From noreply at buildbot.pypy.org Thu May 1 00:24:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 00:24:46 +0200 (CEST) Subject: [pypy-commit] pypy default: fix some numpy dtype_from_spec cases Message-ID: <20140430222446.5491B1C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71107:a951d09ad58f Date: 2014-04-30 18:23 -0400 http://bitbucket.org/pypy/pypy/changeset/a951d09ad58f/ Log: fix some numpy dtype_from_spec cases diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -1,3 +1,4 @@ +import string from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt @@ -497,7 +498,9 @@ return w_dtype elif space.isinstance_w(w_dtype, space.w_str): name = space.str_w(w_dtype) - if ',' in name: + if ',' in name or \ + name[0] in string.digits or \ + name[0] in '<>=|' and name[1] in string.digits: return dtype_from_spec(space, w_dtype) cname = name[1:] if name[0] == NPY.OPPBYTE else name try: From noreply at buildbot.pypy.org Thu May 1 00:31:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 00:31:15 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: add tests Message-ID: <20140430223115.AB9BC1C05CE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71108:f5c53e131b78 Date: 2014-05-01 01:23 +0300 http://bitbucket.org/pypy/pypy/changeset/f5c53e131b78/ Log: add tests diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -61,6 +61,41 @@ py.test.fail("could not find the localhost address in %r" % (address_list,)) +def test_thread_safe_gethostbyname_ex(): + import threading + nthreads = 10 + domain = 'google.com' + result = [0] * nthreads + threads = [None] * nthreads + def lookup_name(i): + name, aliases, address_list = gethostbyname_ex(domain) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_name, args=[i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + +def test_thread_safe_gethostbyaddr(): + import threading + nthreads = 10 + ip = '8.8.8.8' + domain = gethostbyaddr(ip)[0] + result = [0] * nthreads + threads = [None] * nthreads + def lookup_addr(ip, i): + name, aliases, address_list = gethostbyaddr(ip) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_addr, args=[ip, i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + def test_gethostbyaddr(): try: cpy_socket.gethostbyaddr("::1") From noreply at buildbot.pypy.org Thu May 1 00:31:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 00:31:16 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: add locks and copy hostent. now it segfaults, add print to see where Message-ID: <20140430223116.EF3A61C05CE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71109:df59574c2222 Date: 2014-05-01 01:29 +0300 http://bitbucket.org/pypy/pypy/changeset/df59574c2222/ Log: add locks and copy hostent. now it segfaults, add print to see where diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -30,7 +30,7 @@ 'stdio.h', 'netdb.h', 'arpa/inet.h', - 'stdint.h', + 'stdint.h', 'errno.h', ) if _HAS_AF_PACKET: @@ -139,7 +139,7 @@ EAI_SOCKTYPE EAI_SYSTEM IPPROTO_AH IPPROTO_BIP IPPROTO_DSTOPTS IPPROTO_EGP IPPROTO_EON IPPROTO_ESP -IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS +IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS IPPROTO_ICMPV6 IPPROTO_IDP IPPROTO_IGMP IPPROTO_IPCOMP IPPROTO_IPIP IPPROTO_IPV4 IPPROTO_IPV6 IPPROTO_MAX IPPROTO_MOBILE IPPROTO_ND IPPROTO_NONE IPPROTO_PIM IPPROTO_PUP IPPROTO_ROUTING IPPROTO_RSVP IPPROTO_TCP IPPROTO_TP @@ -174,7 +174,7 @@ SOCK_DGRAM SOCK_RAW SOCK_RDM SOCK_SEQPACKET SOCK_STREAM -SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE +SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE SO_ACCEPTCONN SO_BROADCAST SO_DEBUG SO_DONTROUTE SO_ERROR SO_EXCLUSIVEADDRUSE SO_KEEPALIVE SO_LINGER SO_OOBINLINE SO_RCVBUF SO_RCVLOWAT SO_RCVTIMEO @@ -286,7 +286,7 @@ ('nl_pid', rffi.INT), ('nl_groups', rffi.INT)], ifdef='AF_NETLINK') - + CConfig.addrinfo = platform.Struct('struct addrinfo', [('ai_flags', rffi.INT), ('ai_family', rffi.INT), @@ -447,6 +447,7 @@ #in_addr_size = sizeof(in_addr) in6_addr = cConfig.in6_addr addrinfo = cConfig.addrinfo +hostent = cConfig.hostent if _POSIX: nfds_t = cConfig.nfds_t pollfd = cConfig.pollfd diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -20,6 +20,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof +from rpython.rlib import rthread INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): @@ -29,6 +30,8 @@ constants = _c.constants locals().update(constants) # Define constants from _c +ll_locks = {} + if _c.WIN32: from rpython.rlib import rwin32 def rsocket_startup(): @@ -38,9 +41,12 @@ assert res == 0 finally: lltype.free(wsadata, flavor='raw') + ll_locks['gethostbyname'] = rthread.allocate_lock() + ll_locks['gethostbyaddr'] = rthread.allocate_lock() else: def rsocket_startup(): - pass + ll_locks['gethostbyname'] = rthread.allocate_lock() + ll_locks['gethostbyaddr'] = rthread.allocate_lock() def ntohs(x): @@ -1125,21 +1131,31 @@ return (rffi.charp2str(hostent.c_h_name), aliases, address_list) def gethostbyname_ex(name): - # XXX use gethostbyname_r() if available, and/or use locks if not - addr = gethostbyname(name) - hostent = _c.gethostbyname(name) - return gethost_common(name, hostent, addr) + # XXX use gethostbyname_r() if available instead of locks + with lltype.scoped_alloc(rffi.CArray(_c.hostent), 1) as hostent: + addr = gethostbyname(name) + ll_locks['gethostbyname'].acquire(True) + _hostent = _c.gethostbyname(name) + if not _hostent: + raise HSocketError(name) + rffi.structcopy(hostent[0], _hostent) + ll_locks['gethostbyname'].release() + return gethost_common(name, hostent[0], addr) def gethostbyaddr(ip): - # XXX use gethostbyaddr_r() if available, and/or use locks if not + # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) p, size = addr.lock_in_addr() try: - hostent = _c.gethostbyaddr(p, size, addr.family) + with lltype.scoped_alloc(rffi.CArray(_c.hostent), 1) as hostent: + ll_locks['gethostbyaddr'].acquire(True) + _hostent = _c.gethostbyaddr(p, size, addr.family) + rffi.structcopy(hostent[0], _hostent) + ll_locks['gethostbyaddr'].release() + return gethost_common(ip, hostent[0], addr) finally: addr.unlock() - return gethost_common(ip, hostent, addr) def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -67,21 +67,27 @@ domain = 'google.com' result = [0] * nthreads threads = [None] * nthreads + print 'starting', 70 def lookup_name(i): name, aliases, address_list = gethostbyname_ex(domain) if name == domain: result[i] += 1 + print 'done',i,75 for i in range(nthreads): threads[i] = threading.Thread(target = lookup_name, args=[i]) threads[i].start() + print 'threads', 78 + print 'done', 79 for i in range(nthreads): threads[i].join() assert sum(result) == nthreads + print 'done', 82 def test_thread_safe_gethostbyaddr(): import threading nthreads = 10 ip = '8.8.8.8' + print 'starting', 87 domain = gethostbyaddr(ip)[0] result = [0] * nthreads threads = [None] * nthreads @@ -92,6 +98,7 @@ for i in range(nthreads): threads[i] = threading.Thread(target = lookup_addr, args=[ip, i]) threads[i].start() + print 'threads', 98 for i in range(nthreads): threads[i].join() assert sum(result) == nthreads From noreply at buildbot.pypy.org Thu May 1 01:32:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 01:32:40 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: cleanup Message-ID: <20140430233240.26A781D2B59@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: issue1430 Changeset: r71110:1505774100fd Date: 2014-04-30 19:24 -0400 http://bitbucket.org/pypy/pypy/changeset/1505774100fd/ Log: cleanup diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -447,7 +447,6 @@ #in_addr_size = sizeof(in_addr) in6_addr = cConfig.in6_addr addrinfo = cConfig.addrinfo -hostent = cConfig.hostent if _POSIX: nfds_t = cConfig.nfds_t pollfd = cConfig.pollfd diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1132,30 +1132,22 @@ def gethostbyname_ex(name): # XXX use gethostbyname_r() if available instead of locks - with lltype.scoped_alloc(rffi.CArray(_c.hostent), 1) as hostent: - addr = gethostbyname(name) - ll_locks['gethostbyname'].acquire(True) - _hostent = _c.gethostbyname(name) - if not _hostent: - raise HSocketError(name) - rffi.structcopy(hostent[0], _hostent) - ll_locks['gethostbyname'].release() - return gethost_common(name, hostent[0], addr) + addr = gethostbyname(name) + with ll_locks['gethostbyname']: + hostent = _c.gethostbyname(name) + return gethost_common(name, hostent, addr) def gethostbyaddr(ip): # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - p, size = addr.lock_in_addr() - try: - with lltype.scoped_alloc(rffi.CArray(_c.hostent), 1) as hostent: - ll_locks['gethostbyaddr'].acquire(True) - _hostent = _c.gethostbyaddr(p, size, addr.family) - rffi.structcopy(hostent[0], _hostent) - ll_locks['gethostbyaddr'].release() - return gethost_common(ip, hostent[0], addr) - finally: - addr.unlock() + with ll_locks['gethostbyaddr']: + p, size = addr.lock_in_addr() + try: + hostent = _c.gethostbyaddr(p, size, addr.family) + finally: + addr.unlock() + return gethost_common(ip, hostent, addr) def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, From noreply at buildbot.pypy.org Thu May 1 01:50:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 01:50:09 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20140430235009.CC6C11D2C2C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71111:6dcb8c27710f Date: 2014-04-30 19:49 -0400 http://bitbucket.org/pypy/pypy/changeset/6dcb8c27710f/ Log: typo diff --git a/rpython/jit/metainterp/test/test_string.py b/rpython/jit/metainterp/test/test_string.py --- a/rpython/jit/metainterp/test/test_string.py +++ b/rpython/jit/metainterp/test/test_string.py @@ -478,7 +478,7 @@ return len(sa.val) assert self.meta_interp(f, ['a']) == f('a') - def test_string_comepare_quasiimmutable(self): + def test_string_compare_quasiimmutable(self): class Sys(object): _immutable_fields_ = ["defaultencoding?"] def __init__(self, s): From noreply at buildbot.pypy.org Thu May 1 01:53:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 01:53:54 +0200 (CEST) Subject: [pypy-commit] pypy default: backout 3f8b9a32c444, broke tests Message-ID: <20140430235354.7891E1D2C2C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71112:43fa812a3a88 Date: 2014-04-30 19:53 -0400 http://bitbucket.org/pypy/pypy/changeset/43fa812a3a88/ Log: backout 3f8b9a32c444, broke tests http://buildbot.pypy.org/summary?b ranch=%3Ctrunk%3E&recentrev=71106:fc261cbeb029 diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -57,28 +57,6 @@ self.emit_operation(nextop) def optimize_CALL_PURE(self, op): - # Step 1: check if all arguments are constant - arg_consts = [] - for i in range(op.numargs()): - arg = op.getarg(i) - const = self.get_constant_box(arg) - if const is None: - break - arg_consts.append(const) - else: - # all constant arguments: check if we already know the result - try: - result = self.optimizer.call_pure_results[arg_consts] - except KeyError: - pass - else: - # this removes a CALL_PURE with all constant arguments. - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return - - # Step 2: check if all arguments are the same as a previous - # CALL_PURE. args = self.optimizer.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -515,9 +515,30 @@ return True # 0-length arraycopy return False + def optimize_CALL_PURE(self, op): + arg_consts = [] + for i in range(op.numargs()): + arg = op.getarg(i) + const = self.get_constant_box(arg) + if const is None: + break + arg_consts.append(const) + else: + # all constant arguments: check if we already know the result + try: + result = self.optimizer.call_pure_results[arg_consts] + except KeyError: + pass + else: + # this removes a CALL_PURE with all constant arguments. + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return + self.emit_operation(op) + def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: - # it was a CALL_LOOPINVARIANT that was killed; + # it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed; # so we also kill the following GUARD_NO_EXCEPTION return self.emit_operation(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5164,6 +5164,7 @@ self.optimize_strunicode_loop(ops, expected) def test_call_pure_vstring_const(self): + py.test.skip("implement me") ops = """ [] p0 = newstr(3) From noreply at buildbot.pypy.org Thu May 1 01:59:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 01:59:21 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140430235921.632331D2C2D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71113:388f7ec7a85b Date: 2014-04-30 19:58 -0400 http://bitbucket.org/pypy/pypy/changeset/388f7ec7a85b/ Log: cleanup diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -208,15 +208,11 @@ def create_spec_for_function(space, w_func): - if w_func.w_module is None: - module = '' - else: + if w_func.w_module is not None: module = space.str_w(w_func.w_module) - if module == '__builtin__': - module = '' - else: - module += '.' - return '{%s%s}' % (module, w_func.name) + if module != '__builtin__': + return '{%s.%s}' % (module, w_func.name) + return '{%s}' % w_func.name def create_spec_for_object(space, w_obj): From noreply at buildbot.pypy.org Thu May 1 03:24:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 03:24:41 +0200 (CEST) Subject: [pypy-commit] pypy default: properly handle dtype commastrings Message-ID: <20140501012441.F29651C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71114:e00029917beb Date: 2014-04-30 21:17 -0400 http://bitbucket.org/pypy/pypy/changeset/e00029917beb/ Log: properly handle dtype commastrings diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -469,6 +469,23 @@ return dtype_from_list(space, w_lst, True) +def _check_for_commastring(s): + if s[0] in string.digits or s[0] in '<>=|' and s[1] in string.digits: + return True + if s[0] == '(' and s[1] == ')' or s[0] in '<>=|' and s[1] == '(' and s[2] == ')': + return True + sqbracket = 0 + for c in s: + if c == ',': + if sqbracket == 0: + return True + elif c == '[': + sqbracket += 1 + elif c == ']': + sqbracket -= 1 + return False + + def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None): # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) @@ -498,9 +515,7 @@ return w_dtype elif space.isinstance_w(w_dtype, space.w_str): name = space.str_w(w_dtype) - if ',' in name or \ - name[0] in string.digits or \ - name[0] in '<>=|' and name[1] in string.digits: + if _check_for_commastring(name): return dtype_from_spec(space, w_dtype) cname = name[1:] if name[0] == NPY.OPPBYTE else name try: From noreply at buildbot.pypy.org Thu May 1 06:05:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 06:05:21 +0200 (CEST) Subject: [pypy-commit] pypy default: support numpy.dtype('a#') Message-ID: <20140501040521.922021C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71115:d76f8f5ea228 Date: 2014-04-30 23:17 -0400 http://bitbucket.org/pypy/pypy/changeset/d76f8f5ea228/ Log: support numpy.dtype('a#') diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -526,7 +526,7 @@ if name[0] == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) return dtype - if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': + if name[0] in 'VSUca' or name[0] in '<>=|' and name[1] in 'VSUca': return variable_dtype(space, name) raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) elif space.isinstance_w(w_dtype, space.w_list): @@ -607,7 +607,7 @@ raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: return new_string_dtype(space, 1, NPY.CHARLTR) - elif char == NPY.STRINGLTR: + elif char == NPY.STRINGLTR or char == NPY.STRINGLTR2: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -191,6 +191,9 @@ d = dtype('S5') assert repr(d) == "dtype('S5')" assert str(d) == "|S5" + d = dtype('a5') + assert repr(d) == "dtype('S5')" + assert str(d) == "|S5" d = dtype('U5') assert repr(d) == "dtype('%sU5')" % b assert str(d) == "%sU5" % b From noreply at buildbot.pypy.org Thu May 1 06:05:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 06:05:22 +0200 (CEST) Subject: [pypy-commit] pypy default: support ndarray.clip with only one of min or max Message-ID: <20140501040522.C625E1C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71116:891a19edd076 Date: 2014-04-30 23:51 -0400 http://bitbucket.org/pypy/pypy/changeset/891a19edd076/ Log: support ndarray.clip with only one of min or max diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -29,9 +29,11 @@ if not space.is_none(out): return out - dtype = w_arr_list[0].get_dtype() - for w_arr in w_arr_list[1:]: - dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) + dtype = None + for w_arr in w_arr_list: + if not space.is_none(w_arr): + dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) + assert dtype is not None out = base.W_NDimArray.from_shape(space, shape, dtype) return out diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -606,25 +606,34 @@ reds = 'auto') def clip(space, arr, shape, min, max, out): + assert min or max arr_iter, arr_state = arr.create_iter(shape) + if min is not None: + min_iter, min_state = min.create_iter(shape) + else: + min_iter, min_state = None, None + if max is not None: + max_iter, max_state = max.create_iter(shape) + else: + max_iter, max_state = None, None + out_iter, out_state = out.create_iter(shape) + shapelen = len(shape) dtype = out.get_dtype() - shapelen = len(shape) - min_iter, min_state = min.create_iter(shape) - max_iter, max_state = max.create_iter(shape) - out_iter, out_state = out.create_iter(shape) while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) - w_min = min_iter.getitem(min_state).convert_to(space, dtype) - w_max = max_iter.getitem(max_state).convert_to(space, dtype) - if dtype.itemtype.lt(w_v, w_min): - w_v = w_min - elif dtype.itemtype.gt(w_v, w_max): - w_v = w_max + arr_state = arr_iter.next(arr_state) + if min_iter is not None: + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + if dtype.itemtype.lt(w_v, w_min): + w_v = w_min + min_state = min_iter.next(min_state) + if max_iter is not None: + w_max = max_iter.getitem(max_state).convert_to(space, dtype) + if dtype.itemtype.gt(w_v, w_max): + w_v = w_max + max_state = max_iter.next(max_state) out_iter.setitem(out_state, w_v) - arr_state = arr_iter.next(arr_state) - min_state = min_iter.next(min_state) - max_state = max_iter.next(max_state) out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -593,17 +593,25 @@ def descr_choose(self, space, w_choices, w_out=None, w_mode=None): return choose(space, self, w_choices, w_out, w_mode) - def descr_clip(self, space, w_min, w_max, w_out=None): + def descr_clip(self, space, w_min=None, w_max=None, w_out=None): + if space.is_none(w_min): + w_min = None + else: + w_min = convert_to_array(space, w_min) + if space.is_none(w_max): + w_max = None + else: + w_max = convert_to_array(space, w_max) if space.is_none(w_out): w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) - min = convert_to_array(space, w_min) - max = convert_to_array(space, w_max) - shape = shape_agreement_multiple(space, [self, min, max, w_out]) - out = descriptor.dtype_agreement(space, [self, min, max], shape, w_out) - loop.clip(space, self, shape, min, max, out) + if not w_min and not w_max: + raise oefmt(space.w_ValueError, "One of max or min must be given.") + shape = shape_agreement_multiple(space, [self, w_min, w_max, w_out]) + out = descriptor.dtype_agreement(space, [self, w_min, w_max], shape, w_out) + loop.clip(space, self, shape, w_min, w_max, out) return out def descr_get_ctypes(self, space): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2229,7 +2229,13 @@ def test_clip(self): from numpypy import array a = array([1, 2, 17, -3, 12]) + exc = raises(ValueError, a.clip) + assert str(exc.value) == "One of max or min must be given." assert (a.clip(-2, 13) == [1, 2, 13, -2, 12]).all() + assert (a.clip(min=-2) == [1, 2, 17, -2, 12]).all() + assert (a.clip(min=-2, max=None) == [1, 2, 17, -2, 12]).all() + assert (a.clip(max=13) == [1, 2, 13, -3, 12]).all() + assert (a.clip(min=None, max=13) == [1, 2, 13, -3, 12]).all() assert (a.clip(-1, 1, out=None) == [1, 1, 1, -1, 1]).all() assert (a == [1, 2, 17, -3, 12]).all() assert (a.clip(-1, [1, 2, 3, 4, 5]) == [1, 2, 3, -1, 5]).all() diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -477,6 +477,8 @@ promote_bools=False): if dt2 is None: return dt1 + if dt1 is None: + return dt2 # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 From noreply at buildbot.pypy.org Thu May 1 09:26:09 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:09 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140501072609.EDCFB1C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71117:3fb585ce5d69 Date: 2014-04-28 15:25 -0700 http://bitbucket.org/pypy/pypy/changeset/3fb585ce5d69/ Log: merge default into branch diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,11 +1,17 @@ ======================================= -PyPy 2.3 - XXXX TODO +PyPy 2.3 - Easier Than Ever ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. -This release also contains several bugfixes and performance improvements. +This release also contains several bugfixes and performance improvements, +many generated by real users finding corner cases our `TDD`_ methods missed. +`CFFI`_ has made it easier than ever to use existing C code with both cpython +and PyPy, easing the transition for packages like `cryptography`_, `Pillow`_ +(Python Imaging Library [Fork]), a basic port of `pygame-cffi`_, and others. + +PyPy can now be embedded in a hosting application, for instance inside `uWSGI`_ You can download the PyPy 2.3 release here: @@ -17,72 +23,112 @@ Please consider donating more so we can finish those projects! The three projects are: -* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. -* STM (software transactional memory): a preview will be released very soon, - as soon as we fix a few bugs +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs -* NumPy: the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +.. _`CFFI`: http://cffi.readthedocs.org +.. _`cryptography`: https://cryptography.io +.. _`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +.. _`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +.. _`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; +note that the latest cpython is not faster than cpython 2.7.2) due to its integrated tracing JIT compiler. -This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows -32, or ARM (ARMv6 or ARMv7, with VFPv3). +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. -Work on the native Windows 64 is still stalling, we would welcome a volunteer -to handle that. +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. .. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -* Our Garbage Collector is now "incremental". It should avoid almost - all pauses due to a major collection taking place. Previously, it - would pause the program (rarely) to walk all live objects, which - could take arbitrarily long if your process is using a whole lot of - RAM. Now the same work is done in steps. This should make PyPy - more responsive, e.g. in games. There are still other pauses, from - the GC and the JIT, but they should be on the order of 5 - milliseconds each. +Bugfixes +-------- -* The JIT counters for hot code were never reset, which meant that a - process running for long enough would eventually JIT-compile more - and more rarely executed code. Not only is it useless to compile - such code, but as more compiled code means more memory used, this - gives the impression of a memory leak. This has been tentatively - fixed by decreasing the counters from time to time. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. -* NumPy has been split: now PyPy only contains the core module, called - ``_numpypy``. The ``numpy`` module itself has been moved to - ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. - You need to install NumPy separately with a virtualenv: +* The ARM port no longer crashes on unaligned memory access to floats and doubles, + and singlefloats are supported in the JIT. + +* Generators are faster since they now skip unecessary cleanup + +* A first time contributor simplified JIT traces by adding integer bound + propagation in indexing and logical operations. + +* Optimize consecutive dictionary lookups of the same key in a chain + +* Our extensive pre-translation test suite now runs nightly on more platforms + +* Fix issues with reimporting builtin modules + +* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port + +.. _`HippyVM`: http://www.hippyvm.com + +New Platforms and Features +-------------------------- + +* Support for OpenBSD + +* Code cleanup: we continue to prune out old and unused code, and to refactor + large parts of the codebase. We have sepearated rpython from the PyPy python + interpreter, and rpython is seeing use in other dynamic language projects. + +* Support for precompiled headers in the build process for MSVC + +* Support for objects with __int__ and __float__ methods + +* Tweak support of errno in cpyext (the PyPy implemenation of the capi) + + +Numpy +----- +Numpy support has been split into a builtin ``_numpy`` module and a +fork of the numpy code base adapted to pypy at + ``https://bitbucket.org/pypy/numpy``. +You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; or directly: ``git clone https://bitbucket.org/pypy/numpy.git``; ``cd numpy``; ``pypy setup.py install``. -* non-inlined calls have less overhead +* NumPy support has been improved, many failures in indexing, dtypes, + and scalars were corrected. We are slowly approaching our goal of passing + the numpy test suite. We still do not support object or unicode ndarrays. -* Things that use ``sys.set_trace`` are now JITted (like coverage) +* speed of iteration in dot() is now within 1.5x of the numpy c + implementation (without BLAS acceleration). Since the same array + iterator is used throughout the ``_numpy`` module, speed increases should + be apparent in all Numpy functionality. -* JSON decoding is now very fast (JSON encoding was already very fast) +* Most of the core functionality of nditer has been implemented. -* various buffer copying methods experience speedups (like list-of-ints to - ``int[]`` buffer from cffi) +* A cffi-based ``numpy.random`` module is available as a branch in the numpy + repository, it will be merged soon after this release. -* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, - including ``os.startfile()`` on Windows and a handful of rare ones - on Posix. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. -* numpy has a rudimentary C API that cooperates with ``cpyext`` +Cheers +The PyPy Team -Cheers, -Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -42,6 +42,10 @@ use this sandboxed PyPy from a regular Python interpreter (CPython, or an unsandboxed PyPy). Contributions welcome. +.. warning:: + + Tested with PyPy2. May not work out of the box with PyPy3. + Overview -------- diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -299,9 +299,13 @@ """ self._application_traceback = traceback - at specialize.memo() + +class ClearedOpErr: + def __init__(self, space): + self.operr = OperationError(space.w_None, space.w_None) + def get_cleared_operation_error(space): - return OperationError(space.w_None, space.w_None) + return space.fromcache(ClearedOpErr).operr # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -205,11 +205,14 @@ def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! """Implements sys.exc_info(). Return an OperationError instance or None.""" - frame = self.gettopframe_nohidden() + frame = self.gettopframe() while frame: if frame.last_exception is not None: - return frame.last_exception - frame = self.getnextframe_nohidden(frame) + if (not frame.hide() or + frame.last_exception is + get_cleared_operation_error(self.space)): + return frame.last_exception + frame = frame.f_backref() return None def set_sys_exc_info(self, operror): diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -15,10 +15,11 @@ space.setitem(w_modules, w_main, mainmodule) return mainmodule + def compilecode(space, source, filename, cmd='exec'): w = space.wrap - w_code = space.builtin.call('compile', - w(source), w(filename), w(cmd), w(0), w(0)) + w_code = space.builtin.call( + 'compile', w(source), w(filename), w(cmd), w(0), w(0)) pycode = space.interp_w(eval.Code, w_code) return pycode @@ -28,7 +29,7 @@ cmd = 'eval' else: cmd = 'exec' - + try: if space is None: from pypy.objspace.std import StdObjSpace @@ -55,18 +56,22 @@ operationerr.record_interpreter_traceback() raise + def run_string(source, filename=None, space=None): _run_eval_string(source, filename, space, False) + def eval_string(source, filename=None, space=None): return _run_eval_string(source, filename, space, True) + def run_file(filename, space=None): - if __name__=='__main__': + if __name__ == '__main__': print "Running %r with %r" % (filename, space) istring = open(filename).read() run_string(istring, filename, space) + def run_module(module_name, args, space=None): """Implements PEP 338 'Executing modules as scripts', overwriting sys.argv[1:] using `args` and executing the module `module_name`. @@ -89,7 +94,6 @@ return space.call_function(w_run_module, w(module_name), space.w_None, w('__main__'), space.w_True) -# ____________________________________________________________ def run_toplevel(space, f, verbose=False): """Calls f() and handle all OperationErrors. diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -351,17 +351,17 @@ self.promote_step = promote_step def descr_new(space, w_subtype, w_start, w_stop=None, w_step=None): - start = _toint(space, w_start) + start = space.int_w(w_start) if space.is_none(w_step): # no step argument provided step = 1 promote_step = True else: - step = _toint(space, w_step) + step = space.int_w(w_step) promote_step = False if space.is_none(w_stop): # only 1 argument provided start, stop = 0, start else: - stop = _toint(space, w_stop) + stop = space.int_w(w_stop) howmany = get_len_of_range(space, start, stop, step) obj = space.allocate_instance(W_XRange, w_subtype) W_XRange.__init__(obj, space, start, howmany, step, promote_step) @@ -425,11 +425,6 @@ minint = -sys.maxint - 1 return minint if last < minint - step else last + step -def _toint(space, w_obj): - # this also supports float arguments. CPython still does, too. - # needs a bit more thinking in general... - return space.int_w(space.int(w_obj)) - W_XRange.typedef = TypeDef("xrange", __new__ = interp2app(W_XRange.descr_new.im_func), __repr__ = interp2app(W_XRange.descr_repr), @@ -441,6 +436,7 @@ ) W_XRange.typedef.acceptable_as_base_class = False + class W_XRangeIterator(W_Root): def __init__(self, space, current, remaining, step): self.space = space @@ -488,7 +484,10 @@ ) W_XRangeIterator.typedef.acceptable_as_base_class = False + class W_XRangeStepOneIterator(W_XRangeIterator): + _immutable_fields_ = ['stop'] + def __init__(self, space, start, stop): self.space = space self.current = start diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -311,14 +311,14 @@ def test_xrange_len(self): x = xrange(33) assert len(x) == 33 - x = xrange(33.2) - assert len(x) == 33 + exc = raises(TypeError, xrange, 33.2) + assert "integer" in str(exc.value) x = xrange(33,0,-1) assert len(x) == 33 x = xrange(33,0) assert len(x) == 0 - x = xrange(33,0.2) - assert len(x) == 0 + exc = raises(TypeError, xrange, 33, 0.2) + assert "integer" in str(exc.value) x = xrange(0,33) assert len(x) == 33 x = xrange(0,33,-1) diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -1,5 +1,4 @@ class AppTestMap: - def test_trivial_map_one_seq(self): assert map(lambda x: x+2, [1, 2, 3, 4]) == [3, 4, 5, 6] @@ -77,6 +76,7 @@ assert result == [(2, 7), (1, 6), (None, 5), (None, 4), (None, 3), (None, 2), (None, 1)] + class AppTestZip: def test_one_list(self): assert zip([1,2,3]) == [(1,), (2,), (3,)] @@ -93,6 +93,7 @@ yield None assert zip(Foo()) == [] + class AppTestReduce: def test_None(self): raises(TypeError, reduce, lambda x, y: x+y, [1,2,3], None) @@ -105,6 +106,7 @@ assert reduce(lambda x, y: x-y, [10, 2, 8]) == 0 assert reduce(lambda x, y: x-y, [2, 8], 10) == 0 + class AppTestFilter: def test_None(self): assert filter(None, ['a', 'b', 1, 0, None]) == ['a', 'b', 1] @@ -125,6 +127,7 @@ return i * 10 assert filter(lambda x: x != 20, T("abcd")) == (0, 10, 30) + class AppTestXRange: def test_xrange(self): x = xrange(2, 9, 3) @@ -155,7 +158,8 @@ assert list(xrange(0, 10, A())) == [0, 5] def test_xrange_float(self): - assert list(xrange(0.1, 2.0, 1.1)) == [0, 1] + exc = raises(TypeError, xrange, 0.1, 2.0, 1.1) + assert "integer" in str(exc.value) def test_xrange_long(self): import sys @@ -218,6 +222,7 @@ assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] raises(TypeError, reversed, reversed("hello")) + class AppTestApply: def test_apply(self): def f(*args, **kw): @@ -228,6 +233,7 @@ assert apply(f, args) == (args, {}) assert apply(f, args, kw) == (args, kw) + class AppTestAllAny: """ These are copied directly and replicated from the Python 2.5 source code. @@ -277,6 +283,7 @@ S = [10, 20, 30] assert any([x > 42 for x in S]) == False + class AppTestMinMax: def test_min(self): assert min(1, 2) == 1 diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -1,6 +1,6 @@ from rpython.rlib.rstacklet import StackletThread from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef @@ -39,6 +39,7 @@ bottomframe.locals_stack_w[1] = w_callable bottomframe.locals_stack_w[2] = w_args bottomframe.locals_stack_w[3] = w_kwds + bottomframe.last_exception = get_cleared_operation_error(space) self.bottomframe = bottomframe # global_state.origin = self diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -684,3 +684,17 @@ execfile(self.translated, d) d['set_fast_mode']() d['test_various_depths']() + + def test_exc_info_doesnt_follow_continuations(self): + import sys + from _continuation import continulet + # + def f1(c1): + return sys.exc_info() + # + c1 = continulet(f1) + try: + 1 // 0 + except ZeroDivisionError: + got = c1.switch() + assert got == (None, None, None) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -521,6 +521,15 @@ /* misc helpers ----------------------------------------------------------- */ +#if defined(_MSC_VER) +long long cppyy_strtoll(const char* str) { + return _strtoi64(str, NULL, 0); +} + +extern "C" unsigned long long cppyy_strtoull(const char* str) { + return _strtoui64(str, NULL, 0); +} +#else long long cppyy_strtoll(const char* str) { return strtoll(str, NULL, 0); } @@ -528,6 +537,7 @@ extern "C" unsigned long long cppyy_strtoull(const char* str) { return strtoull(str, NULL, 0); } +#endif void cppyy_free(void* ptr) { free(ptr); diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -104,7 +104,6 @@ class dummy: pass self.config = dummy() self.config.translating = False - self.BUF_SIMPLE = 1 def issequence_w(self, w_obj): return True diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -3,16 +3,18 @@ class TestBuffers(BaseTestPyPyC): def test_re_match(self): - def main(): + def main(n): import re import array p = re.compile('.+') a = array.array('c', 'test' * 1000) i = 0 - while i < 5000: + while i < n: i += 1 p.match(a) # ID: match - log = self.run(main, []) + return i + log = self.run(main, [1000]) + assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('match', """ guard_not_invalidated(descr=...) @@ -21,8 +23,5 @@ guard_false(i67, descr=...) i69 = int_gt(., i65) guard_true(i69, descr=...) - guard_not_invalidated(descr=...) - i74 = getfield_raw(., descr=...) - i75 = int_lt(i74, 0) - guard_false(i75, descr=...) + --TICK-- """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -20,8 +20,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i58 = getfield_gc(p18, descr=) - i59 = getfield_gc(p18, descr=) - i60 = int_lt(i58, i59) + i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) p62 = getfield_gc(ConstPtr(ptr37), descr=) diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -14,7 +14,8 @@ if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) -elif platform.cc is not None and platform.cc.startswith(('gcc', 'clang')): +elif platform.cc is not None and \ + os.path.basename(platform.cc).startswith(('gcc', 'clang')): from rpython.rtyper.tool import rffi_platform COMPILER_INFO = 'GCC ' + rffi_platform.getdefinedstring('__VERSION__', '') else: diff --git a/pypy/module/termios/interp_termios.py b/pypy/module/termios/interp_termios.py --- a/pypy/module/termios/interp_termios.py +++ b/pypy/module/termios/interp_termios.py @@ -4,7 +4,7 @@ """ from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import wrap_oserror +from pypy.interpreter.error import wrap_oserror, OperationError from rpython.rlib import rtermios import termios @@ -19,6 +19,10 @@ @unwrap_spec(when=int) def tcsetattr(space, w_fd, when, w_attributes): fd = space.c_filedescriptor_w(w_fd) + if not space.isinstance_w(w_attributes, space.w_list) or \ + space.len_w(w_attributes) != 7: + raise OperationError(space.w_TypeError, space.wrap( + "tcsetattr, arg 3: must be 7 element list")) w_iflag, w_oflag, w_cflag, w_lflag, w_ispeed, w_ospeed, w_cc = \ space.unpackiterable(w_attributes, expected_length=7) w_builtin = space.getbuiltinmodule('__builtin__') diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -86,7 +86,7 @@ child.expect('ok!') def test_ioctl_termios(self): - source = py.code.Source(""" + source = py.code.Source(r""" import termios import fcntl lgt = len(fcntl.ioctl(2, termios.TIOCGWINSZ, '\000'*8)) @@ -149,4 +149,7 @@ def test_error_tcsetattr(self): import termios - raises(ValueError, termios.tcsetattr, 0, 1, (1, 2)) + exc = raises(TypeError, termios.tcsetattr, 0, 1, (1, 2)) + assert str(exc.value) == "tcsetattr, arg 3: must be 7 element list" + exc = raises(TypeError, termios.tcsetattr, 0, 1, (1, 2, 3, 4, 5, 6, 7)) + assert str(exc.value) == "tcsetattr, arg 3: must be 7 element list" diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -289,6 +289,24 @@ greenlet(f).switch() + def test_exc_info_save_restore2(self): + import sys + from greenlet import greenlet + + result = [] + + def f(): + result.append(sys.exc_info()) + + g = greenlet(f) + try: + 1 / 0 + except ZeroDivisionError: + g.switch() + + assert result == [(None, None, None)] + + def test_gr_frame(self): from greenlet import greenlet import sys diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -56,6 +56,8 @@ w = space.wrap w_d = space.newdict() for key, info in w_zipimporter.zip_file.NameToInfo.iteritems(): + if ZIPSEP != os.path.sep: + key = key.replace(ZIPSEP, os.path.sep) space.setitem(w_d, w(key), space.newtuple([ w(info.filename), w(info.compress_type), w(info.compress_size), w(info.file_size), w(info.file_offset), w(info.dostime), diff --git a/pypy/module/zipimport/test/test_undocumented.py b/pypy/module/zipimport/test/test_undocumented.py --- a/pypy/module/zipimport/test/test_undocumented.py +++ b/pypy/module/zipimport/test/test_undocumented.py @@ -135,8 +135,7 @@ importer = zipimport.zipimporter(os.path.join(zip_path, '_pkg')) assert zip_path in zipimport._zip_directory_cache file_set = set(zipimport._zip_directory_cache[zip_path].iterkeys()) - compare_set = set(path.replace(os.path.sep, '/') + '.py' - for path in self.created_paths) + compare_set = set(path + '.py' for path in self.created_paths) assert file_set == compare_set finally: self.cleanup_zipfile(self.created_paths) diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -157,7 +157,6 @@ import sys, os self.writefile("uuu.py", "def f(x): return x") mod = __import__('uuu', globals(), locals(), []) - print mod assert mod.f(3) == 3 expected = { '__doc__' : None, @@ -334,7 +333,9 @@ self.writefile("directory/package/__init__.py", "") importer = zipimport.zipimporter(self.zipfile + "/directory") l = [i for i in zipimport._zip_directory_cache] - assert len(l) + assert len(l) == 1 + k = zipimport._zip_directory_cache[l[0]].keys() + assert k[0] == os.path.sep.join(['directory','package','__init__.py']) def test_path_hooks(self): import sys diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -3,13 +3,12 @@ """ import operator -from rpython.rlib.buffer import Buffer, StringBuffer, SubBuffer +from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from rpython.rlib.objectmodel import compute_hash -from rpython.rlib.rstring import StringBuilder class W_Buffer(W_Root): diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -6,7 +6,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, pwd, urllib +import os, sys, urllib from twisted.internet import reactor, defer from twisted.python import log @@ -34,6 +34,13 @@ 'build-pypy-c-jit-linux-armel', ] +def get_user(): + if sys.platform == 'win32': + return os.environ['USERNAME'] + else: + import pwd + return pwd.getpwuid(os.getuid())[0] + def main(): #XXX: handle release tags #XXX: handle validity checks @@ -49,7 +56,7 @@ print 'Forcing', builder, '...' url = "http://buildbot.pypy.org/builders/" + builder + "/force" args = [ - ('username', pwd.getpwuid(os.getuid())[0]), + ('username', get_user()), ('revision', ''), ('submit', 'Force Build'), ('branch', branch), diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1,5 +1,5 @@ from __future__ import with_statement -import new +import types import py from contextlib import contextmanager @@ -943,7 +943,7 @@ def monkey_patch_code(self, code, stacksize, flags, codestring, names, varnames): c = code - return new.code(c.co_argcount, c.co_nlocals, stacksize, flags, + return types.CodeType(c.co_argcount, c.co_nlocals, stacksize, flags, codestring, c.co_consts, names, varnames, c.co_filename, c.co_name, c.co_firstlineno, c.co_lnotab) @@ -964,7 +964,7 @@ # this code is generated by pypy-c when compiling above f pypy_code = 't\x00\x00\x83\x00\x00}\x00\x00|\x00\x00\xc9\x01\x00\xca\x00\x00S' new_c = self.monkey_patch_code(f.func_code, 3, 3, pypy_code, ('X', 'x', 'm'), ('x',)) - f2 = new.function(new_c, locals(), 'f') + f2 = types.FunctionType(new_c, locals(), 'f') graph = self.codetest(f2) all_ops = self.all_operations(graph) @@ -984,7 +984,7 @@ pypy_code = 'd\x01\x00\xcb\x00\x00D]\x0c\x00}\x00\x00|\x00\x00^\x02\x00q\x07\x00S' new_c = self.monkey_patch_code(f.func_code, 3, 67, pypy_code, (), ('i',)) - f2 = new.function(new_c, locals(), 'f') + f2 = types.FunctionType(new_c, locals(), 'f') graph = self.codetest(f2) all_ops = self.all_operations(graph) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5884,6 +5884,25 @@ """ self.optimize_loop(ops, expected) + def test_bug_unroll_with_immutables(self): + ops = """ + [p0] + i2 = getfield_gc_pure(p0, descr=immut_intval) + p1 = new_with_vtable(ConstClass(intobj_immut_vtable)) + setfield_gc(p1, 1242, descr=immut_intval) + jump(p1) + """ + preamble = """ + [p0] + i2 = getfield_gc_pure(p0, descr=immut_intval) + jump() + """ + expected = """ + [] + jump() + """ + self.optimize_loop(ops, expected, preamble) + def test_immutable_constantfold_recursive(self): ops = """ [] diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -180,10 +180,11 @@ self.optimizer.clear_newoperations() for i in range(len(original_jump_args)): + srcbox = jump_args[i] if values[i].is_virtual(): - values[i].force_box(self.optimizer) - if original_jump_args[i] is not jump_args[i]: - op = ResOperation(rop.SAME_AS, [jump_args[i]], original_jump_args[i]) + srcbox = values[i].force_box(self.optimizer) + if original_jump_args[i] is not srcbox: + op = ResOperation(rop.SAME_AS, [srcbox], original_jump_args[i]) self.optimizer.emit_operation(op) inputarg_setup_ops = self.optimizer.get_newoperations() diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -107,7 +107,7 @@ self.matcher = matcher self.lineno = 0 self.columnno = 0 - + def find_next_token(self): while 1: self.state = 0 @@ -126,8 +126,8 @@ i = ~i stop = self.last_matched_index + 1 assert stop >= 0 - if start == stop: - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + if start == stop: + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) source = self.text[start:stop] @@ -147,7 +147,7 @@ else: raise StopIteration return result - source_pos = SourcePos(i - 1, self.lineno, self.columnno) + source_pos = self.token_position_class(i - 1, self.lineno, self.columnno) raise deterministic.LexerError(self.text, self.state, source_pos) def adjust_position(self, token): @@ -158,7 +158,7 @@ self.columnno += len(token) else: self.columnno = token.rfind("\n") - + # def inner_loop(self, i): # while i < len(self.text): # char = self.text[i] @@ -186,10 +186,15 @@ class LexingDFARunner(AbstractLexingDFARunner): def __init__(self, matcher, automaton, text, ignore, eof=False, token_class=None): - if token_class is None: + + if not token_class: self.token_class = Token + self.token_position_class = SourcePos + else: self.token_class = token_class + self.token_position_class = token_class.source_position_class + AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -198,8 +203,10 @@ def make_token(self, index, state, text, eof=False): assert (eof and state == -1) or 0 <= state < len(self.automaton.names) - source_pos = SourcePos(index, self.lineno, self.columnno) + + source_pos = self.token_position_class(index, self.lineno, self.columnno) if eof: return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], text, source_pos) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1146,9 +1146,9 @@ address_to_fill=None): # port_or_service is a string, not an int (but try str(port_number)). assert port_or_service is None or isinstance(port_or_service, str) - if _c._MACOSX: - if port_or_service is None or port_or_service == '0': - port_or_service = '00' + if _c._MACOSX and flags & AI_NUMERICSERV and \ + (port_or_service is None or port_or_service == '0'): + port_or_service = '00' hints = lltype.malloc(_c.addrinfo, flavor='raw', zero=True) rffi.setintfield(hints, 'c_ai_family', family) rffi.setintfield(hints, 'c_ai_socktype', socktype) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -40,7 +40,7 @@ from rpython.rlib import rposix from rpython.rlib.rstring import StringBuilder -from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC +from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_APPEND O_BINARY = getattr(os, "O_BINARY", 0) # (basemode, plus) @@ -48,8 +48,8 @@ ('r', True): O_RDWR, ('w', False): O_WRONLY | O_CREAT | O_TRUNC, ('w', True): O_RDWR | O_CREAT | O_TRUNC, - ('a', False): O_WRONLY | O_CREAT, - ('a', True): O_RDWR | O_CREAT, + ('a', False): O_WRONLY | O_CREAT | O_APPEND, + ('a', True): O_RDWR | O_CREAT | O_APPEND, } class MyNotImplementedError(Exception): diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -1104,6 +1104,21 @@ finally: signal(SIGALRM, SIG_DFL) + def test_append_mode(self): + tfn = str(udir.join('streamio-append-mode')) + fo = streamio.open_file_as_stream # shorthand + x = fo(tfn, 'w') + x.write('abc123') + x.close() + + x = fo(tfn, 'a') + x.seek(0, 0) + x.write('456') + x.close() + x = fo(tfn, 'r') + assert x.read() == 'abc123456' + x.close() + # Speed test diff --git a/rpython/tool/identity_dict.py b/rpython/tool/identity_dict.py --- a/rpython/tool/identity_dict.py +++ b/rpython/tool/identity_dict.py @@ -3,15 +3,15 @@ except ImportError: idict = None -from UserDict import DictMixin +from collections import MutableMapping -class IdentityDictPurePython(object, DictMixin): +class IdentityDictPurePython(MutableMapping): __slots__ = "_dict _keys".split() def __init__(self): self._dict = {} - self._keys = {} # id(obj) -> obj + self._keys = {} # id(obj) -> obj def __getitem__(self, arg): return self._dict[id(arg)] @@ -24,8 +24,11 @@ del self._keys[id(arg)] del self._dict[id(arg)] - def keys(self): - return self._keys.values() + def __iter__(self): + return self._keys.itervalues() + + def __len__(self): + return len(self._keys) def __contains__(self, arg): return id(arg) in self._dict @@ -37,8 +40,7 @@ return d -class IdentityDictPyPy(object, DictMixin): - __slots__ = ["_dict"] +class IdentityDictPyPy(MutableMapping): def __init__(self): self._dict = idict() @@ -52,8 +54,11 @@ def __delitem__(self, arg): del self._dict[arg] - def keys(self): - return self._dict.keys() + def __iter__(self): + return iter(self._dict.keys()) + + def __len__(self): + return len(self._dict) def __contains__(self, arg): return arg in self._dict @@ -64,8 +69,10 @@ assert len(d) == len(self) return d + def __nonzero__(self): + return bool(self._dict) + if idict is None: identity_dict = IdentityDictPurePython else: identity_dict = IdentityDictPyPy - diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,7 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import sys, os, inspect, new +import sys, os, inspect, types import py def render_docstr(func, indent_str='', closing_str=''): @@ -127,7 +127,7 @@ for name in names: if name not in kwargs: kwargs[name] = getattr(fromcode, name) - return new.code( + return types.CodeType( kwargs['co_argcount'], kwargs['co_nlocals'], kwargs['co_stacksize'], @@ -218,9 +218,8 @@ """Make a renamed copy of a function.""" if globals is None: globals = func.func_globals - f = new.function(func.func_code, globals, - newname, func.func_defaults, - func.func_closure) + f = types.FunctionType(func.func_code, globals, newname, + func.func_defaults, func.func_closure) if func.func_dict: f.func_dict = {} f.func_dict.update(func.func_dict) From noreply at buildbot.pypy.org Thu May 1 09:26:11 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:11 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: clean and refactoring of methods and class creation Message-ID: <20140501072611.390221C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71118:af753187637f Date: 2014-04-29 11:41 -0700 http://bitbucket.org/pypy/pypy/changeset/af753187637f/ Log: clean and refactoring of methods and class creation diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -16,7 +16,7 @@ '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', - 'CPPInstance' : 'interp_cppyy.W_CPPInstance', + 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', } @@ -25,7 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', - 'Template' : 'pythonify.CppyyTemplateType', + 'Template' : 'pythonify.CPPTemplate', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -155,8 +155,7 @@ the memory_regulator.""" _attrs_ = ['space', 'scope', 'index', 'cppmethod', 'arg_defs', 'args_required', - 'args_expected', 'converters', 'executor', '_funcaddr', 'cif_descr', - 'uses_local'] + 'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local'] _immutable_ = True def __init__(self, space, containing_scope, method_index, arg_defs, args_required): @@ -166,7 +165,6 @@ self.cppmethod = capi.c_get_method(self.space, self.scope, method_index) self.arg_defs = arg_defs self.args_required = args_required - self.args_expected = len(arg_defs) # Setup of the method dispatch's innards is done lazily, i.e. only when # the method is actually used. @@ -183,6 +181,12 @@ loc_idx = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, call_local), idx*stride) return rffi.cast(rffi.VOIDP, loc_idx) + def call_w(self, w_cppinstance, args_w): + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) + cppinstance._nullcheck() + cppthis = cppinstance.get_cppthis(self.scope) + return self.call(cppthis, args_w) + @jit.unroll_safe def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -277,7 +281,7 @@ funcaddr = methgetter(rffi.cast(capi.C_OBJECT, cppthis)) self._funcaddr = rffi.cast(rffi.VOIDP, funcaddr) - nargs = self.args_expected + 1 # +1: cppthis + nargs = len(self.arg_defs) + 1 # +1: cppthis # memory block for CIF description (note: not tracked as the life # time of methods is normally the duration of the application) @@ -335,7 +339,7 @@ # extra cif_descr.abi = clibffi.FFI_DEFAULT_ABI - cif_descr.nargs = self.args_expected + 1 # +1: cppthis + cif_descr.nargs = len(self.arg_defs) + 1 # +1: cppthis res = jit_libffi.jit_ffi_prep_cif(cif_descr) if res != clibffi.FFI_OK: @@ -405,21 +409,21 @@ class CPPFunction(CPPMethod): - """Global (namespaced) function dispatcher. For now, the base class has - all the needed functionality, by allowing the C++ this pointer to be null - in the call. An optimization is expected there, however.""" + """Global (namespaced) function dispatcher.""" _immutable_ = True + def call_w(self, w_cppinstance, args_w): + return CPPMethod.call(self, capi.C_NULL_OBJECT, args_w) + def __repr__(self): return "CPPFunction: %s" % self.signature() class CPPTemplatedCall(CPPMethod): - """Method dispatcher that first needs to resolve the template instance. - Note that the derivation is from object: the CPPMethod is a data member.""" + """Method dispatcher that first resolves the template instance.""" - _attrs_ = ['space', 'templ_args', 'method'] + _attrs_ = ['space', 'templ_args'] _immutable_ = True def __init__(self, space, templ_args, containing_scope, method_index, arg_defs, args_required): @@ -456,22 +460,17 @@ _immutable_ = True - def call(self, cppthis, args_w): + def call_w(self, w_cppinstance, args_w): # TODO: these casts are very, very un-pretty; need to find a way of # re-using CPPMethod's features w/o these roundabouts vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - cppinstance = None - try: - cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) - use_args_w = args_w[1:] - except (OperationError, TypeError), e: - use_args_w = args_w - w_result = CPPMethod.call(self, vscope, use_args_w) + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + w_result = CPPMethod.call(self, vscope, args_w) newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) - if cppinstance: + if cppinstance is not None: cppinstance._rawobject = newthis memory_regulator.register(cppinstance) - return args_w[0] + return w_cppinstance return wrap_cppobject(self.space, newthis, self.scope, do_cast=False, python_owns=True, fresh=True) @@ -508,6 +507,7 @@ def __init__(self, space, containing_scope, functions): self.space = space self.scope = containing_scope + assert len(functions) from rpython.rlib import debug self.functions = debug.make_sure_not_resized(functions) @@ -520,14 +520,6 @@ @jit.unroll_safe @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) - if cppinstance is not None: - cppinstance._nullcheck() - cppthis = cppinstance.get_cppthis(self.scope) - else: - cppthis = capi.C_NULL_OBJECT - assert lltype.typeOf(cppthis) == capi.C_OBJECT - # The following code tries out each of the functions in order. If # argument conversion fails (or simply if the number of arguments do # not match), that will lead to an exception, The JIT will snip out @@ -542,7 +534,7 @@ for i in range(len(self.functions)): cppyyfunc = self.functions[i] try: - return cppyyfunc.call(cppthis, args_w) + return cppyyfunc.call_w(w_cppinstance, args_w) except Exception: pass @@ -553,7 +545,7 @@ for i in range(len(self.functions)): cppyyfunc = self.functions[i] try: - return cppyyfunc.call(cppthis, args_w) + return cppyyfunc.call_w(w_cppinstance, args_w) except OperationError, e: # special case if there's just one function, to prevent clogging the error message if len(self.functions) == 1: @@ -906,7 +898,7 @@ def construct(self): if self.default_constructor is not None: - return self.default_constructor.call(capi.C_NULL_OBJECT, []) + return self.default_constructor.call(capi.C_NULL_OBJECT, None, []) raise self.missing_attribute_error("default_constructor") def find_overload(self, name): @@ -1046,6 +1038,16 @@ raise return None + def instance__init__(self, args_w): + try: + constructor_overload = self.cppclass.get_overload(self.cppclass.name) + constructor_overload.call(self, args_w) + except OperationError, e: + if not e.match(self.space, self.space.w_AttributeError): + raise + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name)) + def instance__eq__(self, w_other): # special case: if other is None, compare pointer-style if self.space.is_w(w_other, self.space.w_None): @@ -1128,6 +1130,7 @@ 'CPPInstance', cppclass = interp_attrproperty('cppclass', cls=W_CPPInstance), _python_owns = GetSetProperty(W_CPPInstance.fget_python_owns, W_CPPInstance.fset_python_owns), + __init__ = interp2app(W_CPPInstance.instance__init__), __eq__ = interp2app(W_CPPInstance.instance__eq__), __ne__ = interp2app(W_CPPInstance.instance__ne__), __nonzero__ = interp2app(W_CPPInstance.instance__nonzero__), diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -7,7 +7,7 @@ # with info from multiple dictionaries and do not need to bother with meta # classes for inheritance. Both are python classes, though, and refactoring # may be in order at some point. -class CppyyScopeMeta(type): +class CPPScope(type): def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self @@ -15,16 +15,16 @@ raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) -class CppyyNamespaceMeta(CppyyScopeMeta): +class CPPNamespace(CPPScope): def __dir__(cls): return cls._cpp_proxy.__dir__() -class CppyyClassMeta(CppyyScopeMeta): +class CPPClass(CPPScope): pass -# class CppyyClass defined in _init_pythonify() +# class CPPInstance defined in _init_pythonify() -class CppyyTemplateType(object): +class CPPTemplate(object): def __init__(self, name, scope=None): self._name = name if scope is None: @@ -91,7 +91,7 @@ # build up a representation of a C++ namespace (namespaces are classes) # create a meta class to allow properties (for static data write access) - metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) + metans = type(CPPNamespace)(namespace_name+'_meta', (CPPNamespace,), {}) if cppns: d = {"_cpp_proxy" : cppns} @@ -137,21 +137,14 @@ break return tuple(bases) -def make_new(class_name, cppclass): - try: - constructor_overload = cppclass.get_overload(cppclass.type_name) - except AttributeError: - msg = "cannot instantiate abstract class '%s'" % class_name - def __new__(cls, *args): - raise TypeError(msg) - else: - def __new__(cls, *args): - # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) - if not instance.__class__ is cls: - instance.__class__ = cls # happens for derived class - return instance +def make_new(class_name): + def __new__(cls, *args): + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -159,7 +152,7 @@ # get a list of base classes for class creation bases = [get_pycppclass(base) for base in cppclass.get_base_names()] if not bases: - bases = [CppyyClass,] + bases = [CPPInstance,] else: # it's technically possible that the required class now has been built # if one of the base classes uses it in e.g. a function interface @@ -170,7 +163,7 @@ # create a meta class to allow properties (for static data write access) metabases = [type(base) for base in bases] - metacpp = type(CppyyClassMeta)(class_name+'_meta', _drop_cycles(metabases), {}) + metacpp = type(CPPClass)(class_name+'_meta', _drop_cycles(metabases), {}) # create the python-side C++ class representation def dispatch(self, name, signature): @@ -178,7 +171,7 @@ return types.MethodType(make_method(name, cppol), self, type(self)) d = {"_cpp_proxy" : cppclass, "__dispatch__" : dispatch, - "__new__" : make_new(class_name, cppclass), + "__new__" : make_new(class_name), } pycppclass = metacpp(class_name, _drop_cycles(bases), d) @@ -214,7 +207,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(template_name, scope) + return CPPTemplate(template_name, scope) def get_pycppitem(scope, name): @@ -426,15 +419,12 @@ # at pypy-c startup, rather than on the "import cppyy" statement import cppyy - # top-level classes - global CppyyClass - class CppyyClass(cppyy.CPPInstance): - __metaclass__ = CppyyClassMeta - - def __init__(self, *args, **kwds): - # self is only a placeholder; now create the actual C++ object - args = (self,) + args - self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) + # root of all proxy classes: CPPInstance in pythonify exists to combine the + # CPPClass meta class with the interp-level CPPInstanceBase + global CPPInstance + class CPPInstance(cppyy.CPPInstanceBase): + __metaclass__ = CPPClass + pass # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/test/fragile.h b/pypy/module/cppyy/test/fragile.h --- a/pypy/module/cppyy/test/fragile.h +++ b/pypy/module/cppyy/test/fragile.h @@ -112,4 +112,9 @@ enum E2 { kTwice=12 }; }; +class O { +public: + virtual int abstract() = 0; +}; + } // namespace fragile diff --git a/pypy/module/cppyy/test/fragile_LinkDef.h b/pypy/module/cppyy/test/fragile_LinkDef.h --- a/pypy/module/cppyy/test/fragile_LinkDef.h +++ b/pypy/module/cppyy/test/fragile_LinkDef.h @@ -23,6 +23,7 @@ #pragma link C++ class fragile::L; #pragma link C++ class fragile::M; #pragma link C++ class fragile::N; +#pragma link C++ class fragile::O; #pragma link C++ class fragile::nested1::A; #pragma link C++ class fragile::nested1::nested2::A; #pragma link C++ class fragile::nested1::nested2::nested3::A; diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -202,6 +202,12 @@ f = fragile.fglobal assert f.__doc__ == "void fragile::fglobal(int, double, char)" + try: + o = fragile.O() # raises TypeError + assert 0 + except TypeError, e: + assert "cannot instantiate abstract class 'O'" in str(e) + def test11_dir(self): """Test __dir__ method""" diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -338,8 +338,13 @@ import cppyy example01 = cppyy.gbl.example01 + assert example01.getCount() == 0 + o = example01() assert type(o) == example01 + assert example01.getCount() == 1 + o.destruct() + assert example01.getCount() == 0 class MyClass1(example01): def myfunc(self): @@ -348,7 +353,10 @@ o = MyClass1() assert type(o) == MyClass1 assert isinstance(o, example01) + assert example01.getCount() == 1 assert o.myfunc() == 1 + o.destruct() + assert example01.getCount() == 0 class MyClass2(example01): def __init__(self, what): @@ -357,7 +365,11 @@ o = MyClass2('hi') assert type(o) == MyClass2 + assert example01.getCount() == 1 assert o.what == 'hi' + o.destruct() + + assert example01.getCount() == 0 class AppTestPYTHONIFY_UI: From noreply at buildbot.pypy.org Thu May 1 09:26:12 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:12 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: refactoring/cleanup Message-ID: <20140501072612.644411C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71119:0eff3fae1c87 Date: 2014-04-30 10:54 -0700 http://bitbucket.org/pypy/pypy/changeset/0eff3fae1c87/ Log: refactoring/cleanup diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -158,9 +158,9 @@ 'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local'] _immutable_ = True - def __init__(self, space, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, declaring_scope, method_index, arg_defs, args_required): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.index = method_index self.cppmethod = capi.c_get_method(self.space, self.scope, method_index) self.arg_defs = arg_defs @@ -174,6 +174,12 @@ self._funcaddr = lltype.nullptr(rffi.VOIDP.TO) self.uses_local = False + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) + cppinstance._nullcheck() + return cppinstance.get_cppthis(declaring_scope) + def _address_from_local_buffer(self, call_local, idx): if not call_local: return call_local @@ -181,12 +187,6 @@ loc_idx = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, call_local), idx*stride) return rffi.cast(rffi.VOIDP, loc_idx) - def call_w(self, w_cppinstance, args_w): - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) - cppinstance._nullcheck() - cppthis = cppinstance.get_cppthis(self.scope) - return self.call(cppthis, args_w) - @jit.unroll_safe def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -413,8 +413,9 @@ _immutable_ = True - def call_w(self, w_cppinstance, args_w): - return CPPMethod.call(self, capi.C_NULL_OBJECT, args_w) + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return capi.C_NULL_OBJECT def __repr__(self): return "CPPFunction: %s" % self.signature() @@ -426,11 +427,11 @@ _attrs_ = ['space', 'templ_args'] _immutable_ = True - def __init__(self, space, templ_args, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, templ_args, declaring_scope, method_index, arg_defs, args_required): self.space = space self.templ_args = templ_args # TODO: might have to specialize for CPPTemplatedCall on CPPMethod/CPPFunction here - CPPMethod.__init__(self, space, containing_scope, method_index, arg_defs, args_required) + CPPMethod.__init__(self, space, declaring_scope, method_index, arg_defs, args_required) def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -460,19 +461,15 @@ _immutable_ = True - def call_w(self, w_cppinstance, args_w): - # TODO: these casts are very, very un-pretty; need to find a way of - # re-using CPPMethod's features w/o these roundabouts - vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) - w_result = CPPMethod.call(self, vscope, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) - if cppinstance is not None: - cppinstance._rawobject = newthis - memory_regulator.register(cppinstance) - return w_cppinstance - return wrap_cppobject(self.space, newthis, self.scope, - do_cast=False, python_owns=True, fresh=True) + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return rffi.cast(capi.C_OBJECT, declaring_scope.handle) + + def call(self, cppthis, args_w): + # Note: this does not return a wrapped instance, just a pointer to the + # new instance; the overload must still wrap it before returning. Also, + # cppthis is declaring_scope.handle (as per unpack_cppthis(), above). + return CPPMethod.call(self, cppthis, args_w) def __repr__(self): return "CPPConstructor: %s" % self.signature() @@ -501,12 +498,12 @@ collection of (possibly) overloaded methods or functions. It calls these in order and deals with error handling and reporting.""" - _attrs_ = ['space', 'scope', 'functions'] - _immutable_fields_ = ['scope', 'functions[*]'] + _attrs_ = ['space', 'functions'] + _immutable_fields_ = ['functions[*]'] - def __init__(self, space, containing_scope, functions): + def __init__(self, space, declaring_scope, functions): self.space = space - self.scope = containing_scope + self.scope = declaring_scope assert len(functions) from rpython.rlib import debug self.functions = debug.make_sure_not_resized(functions) @@ -520,6 +517,12 @@ @jit.unroll_safe @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): + # instance handling is specific to the function type only, so take it out + # of the loop over function overloads + cppthis = self.functions[0].unpack_cppthis( + self.space, w_cppinstance, self.functions[0].scope) + assert lltype.typeOf(cppthis) == capi.C_OBJECT + # The following code tries out each of the functions in order. If # argument conversion fails (or simply if the number of arguments do # not match), that will lead to an exception, The JIT will snip out @@ -534,7 +537,7 @@ for i in range(len(self.functions)): cppyyfunc = self.functions[i] try: - return cppyyfunc.call_w(w_cppinstance, args_w) + return cppyyfunc.call(cppthis, args_w) except Exception: pass @@ -545,7 +548,7 @@ for i in range(len(self.functions)): cppyyfunc = self.functions[i] try: - return cppyyfunc.call_w(w_cppinstance, args_w) + return cppyyfunc.call(cppthis, args_w) except OperationError, e: # special case if there's just one function, to prevent clogging the error message if len(self.functions) == 1: @@ -577,6 +580,39 @@ ) +class W_CPPConstructorOverload(W_CPPOverload): + @jit.elidable_promote() + def is_static(self): + return self.space.w_False + + @jit.elidable_promote() + def unpack_cppthis(self, w_cppinstance): + return rffi.cast(capi.C_OBJECT, self.scope.handle) + + @jit.unroll_safe + @unwrap_spec(args_w='args_w') + def call(self, w_cppinstance, args_w): + w_result = W_CPPOverload.call(self, w_cppinstance, args_w) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if cppinstance is not None: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return w_cppinstance + return wrap_cppobject(self.space, newthis, self.functions[0].scope, + do_cast=False, python_owns=True, fresh=True) + + def __repr__(self): + return "W_CPPConstructorOverload(%s)" % [f.signature() for f in self.functions] + +W_CPPConstructorOverload.typedef = TypeDef( + 'CPPConstructorOverload', + is_static = interp2app(W_CPPConstructorOverload.is_static), + call = interp2app(W_CPPConstructorOverload.call), + signature = interp2app(W_CPPOverload.signature), +) + + class W_CPPBoundMethod(W_Root): _attrs_ = ['cppthis', 'method'] @@ -597,9 +633,9 @@ _attrs_ = ['space', 'scope', 'converter', 'offset'] _immutable_fields = ['scope', 'converter', 'offset'] - def __init__(self, space, containing_scope, type_name, offset): + def __init__(self, space, declaring_scope, type_name, offset): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.converter = converter.get_converter(self.space, type_name, '') self.offset = offset @@ -709,7 +745,10 @@ # create the overload methods from the method sets for pyname, methods in methods_temp.iteritems(): CPPMethodSort(methods).sort() - overload = W_CPPOverload(self.space, self, methods[:]) + if pyname == self.name: + overload = W_CPPConstructorOverload(self.space, self, methods[:]) + else: + overload = W_CPPOverload(self.space, self, methods[:]) self.methods[pyname] = overload def full_name(self): @@ -849,14 +888,13 @@ class W_CPPClass(W_CPPScope): - _attrs_ = ['space', 'default_constructor', 'name', 'handle', 'methods', 'datamembers'] - _immutable_fields_ = ['kind', 'default_constructor', 'methods[*]', 'datamembers[*]'] + _attrs_ = ['space', 'name', 'handle', 'methods', 'datamembers'] + _immutable_fields_ = ['kind', 'constructor', 'methods[*]', 'datamembers[*]'] kind = "class" def __init__(self, space, name, opaque_handle): W_CPPScope.__init__(self, space, name, opaque_handle) - self.default_constructor = None def _make_cppfunction(self, pyname, index): num_args = capi.c_method_num_args(self.space, self, index) @@ -868,8 +906,6 @@ arg_defs.append((arg_type, arg_dflt)) if capi.c_is_constructor(self.space, self, index): cppfunction = CPPConstructor(self.space, self, index, arg_defs, args_required) - if args_required == 0: - self.default_constructor = cppfunction elif capi.c_method_is_template(self.space, self, index): templ_args = capi.c_template_args(self.space, self, index) cppfunction = CPPTemplatedCall(self.space, templ_args, self, index, arg_defs, args_required) @@ -897,9 +933,7 @@ self.datamembers[datamember_name] = datamember def construct(self): - if self.default_constructor is not None: - return self.default_constructor.call(capi.C_NULL_OBJECT, None, []) - raise self.missing_attribute_error("default_constructor") + self.get_overload(self.name).call(None, []) def find_overload(self, name): raise self.missing_attribute_error(name) From noreply at buildbot.pypy.org Thu May 1 09:26:13 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:13 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: updates for CINT backend after refactoring Message-ID: <20140501072613.908EB1C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71120:544dc4a17124 Date: 2014-04-30 13:51 -0700 http://bitbucket.org/pypy/pypy/changeset/544dc4a17124/ Log: updates for CINT backend after refactoring diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -127,19 +127,18 @@ argc = len(args_w) try: - # Note: argcount is +1 for the class (== w_self) - if argc < 5 or 6 < argc: + if argc < 4 or 5 < argc: raise TypeError("wrong number of arguments") - # second argument must be a name - funcname = space.str_w(args_w[1]) + # first argument must be a name + funcname = space.str_w(args_w[0]) # last (optional) argument is number of parameters npar = 0 - if argc == 6: npar = space.int_w(args_w[5]) + if argc == 5: npar = space.int_w(args_w[4]) - # third argument must be a callable python object - w_callable = args_w[2] + # second argument must be a callable python object + w_callable = args_w[1] if not space.is_true(space.callable(w_callable)): raise TypeError("2nd argument is not a valid python callable") @@ -159,17 +158,21 @@ # so far, so good; leaves on issue: CINT is expecting a wrapper, but # we need the overload that takes a function pointer, which is not in # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, - space.float_w(args_w[3]), space.float_w(args_w[4]), npar) - - from pypy.module.cppyy import interp_cppyy - w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, - do_cast=False, python_owns=True, fresh=True) + newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, + space.float_w(args_w[2]), space.float_w(args_w[3]), npar) + + # w_self is a null-ptr bound as TF1 + from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator + cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) + cppself._rawobject = newinst + memory_regulator.register(cppself) # tie all the life times to the TF1 instance - space.setattr(w_instance, space.wrap('_callback'), w_callback) + space.setattr(w_self, space.wrap('_callback'), w_callback) - return w_instance + # by definition for __init__ + return None + except (OperationError, TypeError, IndexError), e: newargs_w = args_w[1:] # drop class @@ -312,7 +315,7 @@ # location w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.buffer_w(w_address) + buf = space.getarg_w('s*', w_address) from pypy.module._rawffi import buffer assert isinstance(buf, buffer.RawFFIBuffer) address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) @@ -395,7 +398,7 @@ _method_alias(space, w_pycppclass, "__len__", "GetSize") elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -933,7 +933,7 @@ self.datamembers[datamember_name] = datamember def construct(self): - self.get_overload(self.name).call(None, []) + return self.get_overload(self.name).call(None, []) def find_overload(self, name): raise self.missing_attribute_error(name) diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -435,14 +435,16 @@ class AppTestCINTFUNCTION: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + _pypytest_leaks = None # TODO: figure out the false positives # test the function callbacks; this does not work with Reflex, as it can # not generate functions on the fly (it might with cffi?) + @py.test.mark.dont_track_allocations("TODO: understand; initialization left-over?") def test01_global_function_callback(self): """Test callback of a python global function""" - import cppyy + import cppyy, gc TF1 = cppyy.gbl.TF1 def identity(x): @@ -460,11 +462,12 @@ assert f.Eval(0.5) == 0.5 del f # force here, to prevent leak-check complaints + gc.collect() def test02_callable_object_callback(self): """Test callback of a python callable object""" - import cppyy + import cppyy, gc TF1 = cppyy.gbl.TF1 class Linear: @@ -478,13 +481,14 @@ assert f.Eval(1.3) == 7.6 del f # force here, to prevent leak-check complaints + gc.collect() def test03_fit_with_python_gaussian(self): """Test fitting with a python global function""" # note: this function is dread-fully slow when running testing un-translated - import cppyy, math + import cppyy, gc, math TF1, TH1F = cppyy.gbl.TF1, cppyy.gbl.TH1F def pygaus(x, par): @@ -515,6 +519,7 @@ assert round(result[2] - 1., 1) == 0 # s.d. del f # force here, to prevent leak-check complaints + gc.collect() class AppTestSURPLUS: From noreply at buildbot.pypy.org Thu May 1 09:26:14 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:14 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: save indices separately to make this slightly more maintainable Message-ID: <20140501072614.B32DE1C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71121:3d8b31186b6f Date: 2014-04-30 17:07 -0700 http://bitbucket.org/pypy/pypy/changeset/3d8b31186b6f/ Log: save indices separately to make this slightly more maintainable diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -4,12 +4,14 @@ #include #include #include +#include #include #include #include #include + // add example01.cxx code int globalAddOneToInt(int a); @@ -27,143 +29,162 @@ typedef std::map Handles_t; static Handles_t s_handles; +enum EMethodType { kNormal=0, kConstructor=1, kStatic=2 }; + struct Cppyy_PseudoMethodInfo { Cppyy_PseudoMethodInfo(const std::string& name, const std::vector& argtypes, - const std::string& returntype) : - m_name(name), m_argtypes(argtypes), m_returntype(returntype) {} + const std::string& returntype, + EMethodType mtype = kNormal) : + m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} std::string m_name; std::vector m_argtypes; std::string m_returntype; + EMethodType m_type; }; struct Cppyy_PseudoClassInfo { Cppyy_PseudoClassInfo() {} - Cppyy_PseudoClassInfo(const std::vector& methods) : - m_methods(methods ) {} + Cppyy_PseudoClassInfo(const std::vector& methods, + long method_offset) : + m_methods(methods), m_method_offset(method_offset) {} std::vector m_methods; + long m_method_offset; }; typedef std::map Scopes_t; static Scopes_t s_scopes; -static int example01_last_static_method = 0; -static int example01_last_constructor = 0; -static int payload_methods_offset = 0; +static std::map s_methods; struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- static long s_scope_id = 0; + static long s_method_id = 0; { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // ( 0) static double staticAddToDouble(double a) + // static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double", kStatic)); + s_methods["static_example01::staticAddToDouble_double"] = s_method_id++; - // ( 1) static int staticAddOneToInt(int a) - // ( 2) static int staticAddOneToInt(int a, int b) + // static int staticAddOneToInt(int a) + // static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int_int"] = s_method_id++; - // ( 3) static int staticAtoi(const char* str) + // static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int", kStatic)); + s_methods["static_example01::staticAtoi_cchar*"] = s_method_id++; - // ( 4) static char* staticStrcpy(const char* strin) - methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // static char* staticStrcpy(const char* strin) + methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*", kStatic)); + s_methods["static_example01::staticStrcpy_cchar*"] = s_method_id++; - // ( 5) static void staticSetPayload(payload* p, double d) - // ( 6) static payload* staticCyclePayload(payload* p, double d) - // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + // static void staticSetPayload(payload* p, double d) + // static payload* staticCyclePayload(payload* p, double d) + // static payload staticCopyCyclePayload(payload* p, double d) argtypes.clear(); argtypes.push_back("payload*"); argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void", kStatic)); + s_methods["static_example01::staticSetPayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*", kStatic)); + s_methods["static_example01::staticCyclePayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload", kStatic)); + s_methods["static_example01::staticCopyCyclePayload_payload*_double"] = s_method_id++; - // ( 8) static int getCount() - // ( 9) static void setCount(int) + // static int getCount() + // static void setCount(int) argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int", kStatic)); + s_methods["static_example01::getCount"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void", kStatic)); + s_methods["static_example01::setCount_int"] = s_method_id++; - // cut-off is used in cppyy_is_static - example01_last_static_method = methods.size(); + // example01() + // example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01"] = s_method_id++; + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01_int"] = s_method_id++; - // (10) example01() - // (11) example01(int a) - argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - - // cut-off is used in cppyy_is_constructor - example01_last_constructor = methods.size(); - - // (12) int addDataToInt(int a) + // int addDataToInt(int a) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + s_methods["example01::addDataToInt_int"] = s_method_id++; - // (13) int addDataToIntConstRef(const int& a) + // int addDataToIntConstRef(const int& a) argtypes.clear(); argtypes.push_back("const int&"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + s_methods["example01::addDataToIntConstRef_cint&"] = s_method_id++; - // (14) int overloadedAddDataToInt(int a, int b) + // int overloadedAddDataToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int"] = s_method_id++; - // (15) int overloadedAddDataToInt(int a) - // (16) int overloadedAddDataToInt(int a, int b, int c) + // int overloadedAddDataToInt(int a) + // int overloadedAddDataToInt(int a, int b, int c) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); - + s_methods["example01::overloadedAddDataToInt_int"] = s_method_id++; argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int_int"] = s_method_id++; - // (17) double addDataToDouble(double a) + // double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + s_methods["example01::addDataToDouble_double"] = s_method_id++; - // (18) int addDataToAtoi(const char* str) - // (19) char* addToStringValue(const char* str) + // int addDataToAtoi(const char* str) + // char* addToStringValue(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + s_methods["example01::addDataToAtoi_cchar*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + s_methods["example01::addToStringValue_cchar*"] = s_method_id++; - // (20) void setPayload(payload* p) - // (21) payload* cyclePayload(payload* p) - // (22) payload copyCyclePayload(payload* p) + // void setPayload(payload* p) + // payload* cyclePayload(payload* p) + // payload copyCyclePayload(payload* p) argtypes.clear(); argtypes.push_back("payload*"); methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + s_methods["example01::setPayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + s_methods["example01::cyclePayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + s_methods["example01::copyCyclePayload_payload*"] = s_method_id++; - payload_methods_offset = methods.size(); - - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class example01 @@ -172,21 +193,24 @@ std::vector methods; - // (23) payload(double d = 0.) + // payload(double d = 0.) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor", kConstructor)); + s_methods["payload::payload_double"] = s_method_id++; - // (24) double getData() + // double getData() argtypes.clear(); methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + s_methods["payload::getData"] = s_method_id++; - // (25) void setData(double d) + // void setData(double d) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + s_methods["payload::setData_double"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class payload } @@ -252,133 +276,103 @@ int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; - switch ((long)method) { - case 1: // static int example01::staticAddOneToInt(int) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddOneToInt_int"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 2: // static int example01::staticAddOneToInt(int, int) + } else if (idx == s_methods["static_example01::staticAddOneToInt_int_int"]) { assert(!self && nargs == 2); result = dummy::example01::staticAddOneToInt( ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); - break; - case 3: // static int example01::staticAtoi(const char* str) + } else if (idx == s_methods["static_example01::staticAtoi_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 8: // static int example01::getCount() + } else if (idx == s_methods["static_example01::getCount"]) { assert(!self && nargs == 0); result = dummy::example01::getCount(); - break; - case 12: // int example01::addDataToInt(int a) + } else if (idx == s_methods["example01::addDataToInt_int"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 18: // int example01::addDataToAtoi(const char* str) + } else if (idx == s_methods["example01::addDataToAtoi_cchar*"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else { assert(!"method unknown in cppyy_call_i"); - break; } return result; } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { long result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = (long)dummy::example01::staticStrcpy( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + } else if (idx == s_methods["static_example01::staticCyclePayload_payload*_double"]) { assert(!self && nargs == 2); result = (long)dummy::example01::staticCyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 19: // char* example01::addToStringValue(const char* str) + } else if (idx == s_methods["example01::addToStringValue_cchar*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->addToStringValue( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 21: // payload* example01::cyclePayload(payload* p) + } else if (idx == s_methods["example01::cyclePayload_payload*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else { assert(!"method unknown in cppyy_call_l"); - break; } return result; } double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; - switch ((long)method) { - case 0: // static double example01::staticAddToDouble(double) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddToDouble_double"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); - break; - case 17: // double example01::addDataToDouble(double a) + } else if (idx == s_methods["example01::addDataToDouble_double"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); - break; - case 24: // double payload::getData() + } else if (idx == s_methods["payload::getData"]) { assert(self && nargs == 0); result = ((dummy::payload*)self)->getData(); - break; - default: + } else { assert(!"method unknown in cppyy_call_d"); - break; } return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { char* result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else { assert(!"method unknown in cppyy_call_s"); - break; } return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { void* result = 0; - if (handle == s_handles["example01"]) { - switch ((long)method) { - case 10: - assert(nargs == 0); - result = new dummy::example01; - break; - case 11: - assert(nargs == 1); - result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); - break; - default: - assert(!"method of example01 unknown in cppyy_constructor"); - break; - } - } else if (handle == s_handles["payload"]) { - switch ((long)method) { - case 23: - if (nargs == 0) result = new dummy::payload; - else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); - break; - default: - assert(!"method payload unknown in cppyy_constructor"); - break; - } + const long idx = (long)method; + if (idx == s_methods["example01::example01"]) { + assert(nargs == 0); + result = new dummy::example01; + } else if (idx == s_methods["example01::example01_int"]) { + assert(nargs == 1); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["payload::payload_double"]) { + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + } else { + assert(!"method unknown in cppyy_constructor"); } return (cppyy_object_t)result; } @@ -486,10 +480,10 @@ } cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return (cppyy_method_t)method_index; - else if (handle == s_handles["payload"]) - return (cppyy_method_t)((long)method_index + payload_methods_offset); + if (s_scopes.find(handle) != s_scopes.end()) { + long id = s_scopes[handle].m_method_offset + (long)method_index; + return (cppyy_method_t)id; + } assert(!"unknown class in cppyy_get_method"); return (cppyy_method_t)0; } @@ -497,20 +491,17 @@ /* method properties ----------------------------------------------------- */ int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return example01_last_static_method <= method_index - && method_index < example01_last_constructor; - else if (handle == s_handles["payload"]) - return (long)method_index == 0; + if (s_scopes.find(handle) != s_scopes.end()) + return s_scopes[handle].m_methods[method_index].m_type == kConstructor; + assert(!"unknown class in cppyy_is_constructor"); return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return method_index < example01_last_static_method ? 1 : 0; - if (handle == s_handles["payload"]) - return 0; - return 1; + if (s_scopes.find(handle) != s_scopes.end()) + return s_scopes[handle].m_methods[method_index].m_type == kStatic; + assert(!"unknown class in cppyy_is_staticmethod"); + return 0; } From noreply at buildbot.pypy.org Thu May 1 09:26:15 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:15 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix warning message Message-ID: <20140501072615.D504B1C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71122:ba929dab8b1f Date: 2014-04-30 17:24 -0700 http://bitbucket.org/pypy/pypy/changeset/ba929dab8b1f/ Log: fix warning message diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -16,9 +16,9 @@ class four_vector { public: four_vector(double x, double y, double z, double t) : - m_x(x), m_y(y), m_z(z), m_t(t), m_cc_called(false) {} + m_cc_called(false), m_x(x), m_y(y), m_z(z), m_t(t) {} four_vector(const four_vector& s) : - m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t), m_cc_called(true) {} + m_cc_called(true), m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t) {} double operator[](int i) { if (i == 0) return m_x; From noreply at buildbot.pypy.org Thu May 1 09:26:17 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:17 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: open up more tests (from test_datatypes.py) for use with dummy backend Message-ID: <20140501072617.078A01C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71123:49432e9dce27 Date: 2014-04-30 23:45 -0700 http://bitbucket.org/pypy/pypy/changeset/49432e9dce27/ Log: open up more tests (from test_datatypes.py) for use with dummy backend diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -8,15 +8,18 @@ #include #include +#include #include #include +#pragma GCC diagnostic ignored "-Winvalid-offsetof" // add example01.cxx code int globalAddOneToInt(int a); namespace dummy { #include "example01.cxx" +#include "datatypes.cxx" } int globalAddOneToInt(int a) { @@ -36,7 +39,7 @@ const std::vector& argtypes, const std::string& returntype, EMethodType mtype = kNormal) : - m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} + m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} std::string m_name; std::vector m_argtypes; @@ -44,14 +47,28 @@ EMethodType m_type; }; +struct Cppyy_PseudoDatambrInfo { + Cppyy_PseudoDatambrInfo(const std::string& name, + const std::string& type, + size_t offset, bool isstatic) : + m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} + + std::string m_name; + std::string m_type; + size_t m_offset; + bool m_isstatic; +}; + struct Cppyy_PseudoClassInfo { Cppyy_PseudoClassInfo() {} Cppyy_PseudoClassInfo(const std::vector& methods, - long method_offset) : - m_methods(methods), m_method_offset(method_offset) {} + long method_offset, + const std::vector& data) : + m_methods(methods), m_method_offset(method_offset), m_datambrs(data) {} std::vector m_methods; long m_method_offset; + std::vector m_datambrs; }; typedef std::map Scopes_t; @@ -59,10 +76,57 @@ static std::map s_methods; +#define PUBLIC_CPPYY_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ + offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname, argtypes, #dmtype)); \ + s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + argtypes.push_back(#dmtype); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname, argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + argtypes.clear(); \ + argtypes.push_back("const "#dmtype"&"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname"_c", argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ + PUBLIC_CPPYY_DATA(dmname, dmtype); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array2", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ + PUBLIC_CPPYY_DATA2(dmname, dmtype); \ + argtypes.push_back(#dmtype"*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + argtypes.clear(); argtypes.push_back("void*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_void_array_"#key, argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + +#define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ + (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- - static long s_scope_id = 0; + static long s_scope_id = 0; static long s_method_id = 0; { // class example01 -- @@ -184,10 +248,13 @@ methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); s_methods["example01::copyCyclePayload_payload*"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size()); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class example01 + //==================================================================== + { // class payload -- s_handles["payload"] = (cppyy_scope_t)++s_scope_id; @@ -210,9 +277,62 @@ methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); s_methods["payload::setData_double"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size()); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class payload + + //==================================================================== + + { // class cppyy_test_data -- + s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // cppyy_test_data() + std::vector argtypes; + methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); + s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + + methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); + s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + + std::vector data; + PUBLIC_CPPYY_DATA2(bool, bool); + PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (uchar, unsigned char); + PUBLIC_CPPYY_DATA3(short, short, h); + PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); + PUBLIC_CPPYY_DATA3(int, int, i); + PUBLIC_CPPYY_DATA3(uint, unsigned int, I); + PUBLIC_CPPYY_DATA3(long, long, l); + PUBLIC_CPPYY_DATA3(ulong, unsigned long, L); + PUBLIC_CPPYY_DATA (llong, long long); + PUBLIC_CPPYY_DATA (ullong, unsigned long long); + PUBLIC_CPPYY_DATA3(float, float, f); + PUBLIC_CPPYY_DATA3(double, double, d); + PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (voidp, void*); + + PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); + PUBLIC_CPPYY_STATIC_DATA(short, short); + PUBLIC_CPPYY_STATIC_DATA(ushort, unsigned short); + PUBLIC_CPPYY_STATIC_DATA(int, int); + PUBLIC_CPPYY_STATIC_DATA(uint, unsigned int); + PUBLIC_CPPYY_STATIC_DATA(long, long); + PUBLIC_CPPYY_STATIC_DATA(ulong, unsigned long); + PUBLIC_CPPYY_STATIC_DATA(llong, long long); + PUBLIC_CPPYY_STATIC_DATA(ullong, unsigned long long); + PUBLIC_CPPYY_STATIC_DATA(float, float); + PUBLIC_CPPYY_STATIC_DATA(double, double); + PUBLIC_CPPYY_STATIC_DATA(enum, cppyy_test_data::what); + PUBLIC_CPPYY_STATIC_DATA(voidp, void*); + + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size(), data); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class cppyy_test_data + } } _init; @@ -254,26 +374,136 @@ /* method/function dispatching -------------------------------------------- */ void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - switch ((long)method) { - case 5: // static void example01:;staticSetPayload(payload* p, double d) + long idx = (long)method; + if (idx == s_methods["static_example01::staticSetPayload_payload*_double"]) { assert(!self && nargs == 2); dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 9: // static void example01::setCount(int) + } else if (idx == s_methods["static_example01::setCount_int"]) { assert(!self && nargs == 1); dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); - break; - case 20: // void example01::setPayload(payload* p); + } else if (idx == s_methods["example01::setPayload_payload*"]) { assert(self && nargs == 1); ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::destroy_arrays"]) { + assert(self && nargs == 0); + ((dummy::cppyy_test_data*)self)->destroy_arrays(); + } else if (idx == s_methods["cppyy_test_data::set_bool"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_char"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["cppyy_test_data::set_uchar"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); + } else if (idx == s_methods["cppyy_test_data::set_short"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); + } else if (idx == s_methods["cppyy_test_data::set_short_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short_c(*(short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ushort"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); + } else if (idx == s_methods["cppyy_test_data::set_ushort_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort_c(*(unsigned short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_int"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_int_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int_c(*(int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_uint"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["cppyy_test_data::set_uint_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint_c(*(unsigned int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_long"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["cppyy_test_data::set_long_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long_c(*(long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ulong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); + } else if (idx == s_methods["cppyy_test_data::set_ulong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_llong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); + } else if (idx == s_methods["cppyy_test_data::set_llong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong_c(*(long long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ullong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); + } else if (idx == s_methods["cppyy_test_data::set_ullong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_float"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); + } else if (idx == s_methods["cppyy_test_data::set_float_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float_c(*(float*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_double"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::set_double_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double_c(*(double*)&((CPPYY_G__value*)args)[0]); + } else { assert(!"method unknown in cppyy_call_v"); - break; } } +unsigned char cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + unsigned char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_bool"]) { + assert(self && nargs == 0); + result = (unsigned char)((dummy::cppyy_test_data*)self)->get_bool(); + } else { + assert(!"method unknown in cppyy_call_b"); + } + return result; +} + +char cppyy_call_c(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_char"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_char(); + } else if (idx == s_methods["cppyy_test_data::get_uchar"]) { + assert(self && nargs == 0); + result = (char)((dummy::cppyy_test_data*)self)->get_uchar(); + } else { + assert(!"method unknown in cppyy_call_c"); + } + return result; +} + +short cppyy_call_h(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + short result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_short"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_short(); + } else if (idx == s_methods["cppyy_test_data::get_ushort"]) { + assert(self && nargs == 0); + result = (short)((dummy::cppyy_test_data*)self)->get_ushort(); + } else { + assert(!"method unknown in cppyy_call_h"); + } + return result; +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; const long idx = (long)method; @@ -297,6 +527,9 @@ assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::get_int"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_int(); } else { assert(!"method unknown in cppyy_call_i"); } @@ -323,12 +556,154 @@ assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::get_uint"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint(); + } else if (idx == s_methods["cppyy_test_data::get_long"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_long(); + } else if (idx == s_methods["cppyy_test_data::get_ulong"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array2(); + } else if (idx == s_methods["cppyy_test_data::get_short_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array(); + } else if (idx == s_methods["cppyy_test_data::get_short_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array2(); + } else if (idx == s_methods["cppyy_test_data::get_int_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array(); + } else if (idx == s_methods["cppyy_test_data::get_int_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array2(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array2(); + } else if (idx == s_methods["cppyy_test_data::get_long_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array(); + } else if (idx == s_methods["cppyy_test_data::get_long_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array2(); + } else if (idx == s_methods["cppyy_test_data::pass_array_short"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_h"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_h( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ushort"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_H"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_H( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_int"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_i"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_i( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_uint"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_I"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_I( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_long"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_l"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_l( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ulong"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_L"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_L( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_float"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_f"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_f( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_double"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_d"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_d( + (*(double**)&((CPPYY_G__value*)args)[0])); } else { assert(!"method unknown in cppyy_call_l"); } return result; } +long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + long long result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_llong"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_llong(); + } else if (idx == s_methods["cppyy_test_data::get_ullong"]) { + assert(self && nargs == 0); + result = (long long)((dummy::cppyy_test_data*)self)->get_ullong(); + } else { + assert(!"method unknown in cppyy_call_ll"); + } + return result; +} + +float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + float result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_float"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_float(); + } else { + assert(!"method unknown in cppyy_call_f"); + } + return result; +} + double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; const long idx = (long)method; @@ -341,6 +716,9 @@ } else if (idx == s_methods["payload::getData"]) { assert(self && nargs == 0); result = ((dummy::payload*)self)->getData(); + } else if (idx == s_methods["cppyy_test_data::get_double"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_double(); } else { assert(!"method unknown in cppyy_call_d"); } @@ -369,8 +747,12 @@ assert(nargs == 1); result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); } else if (idx == s_methods["payload::payload_double"]) { + assert(nargs == 0 || nargs == 1); if (nargs == 0) result = new dummy::payload; else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::cppyy_test_data"]) { + assert(nargs == 0); + result = new dummy::cppyy_test_data; } else { assert(!"method unknown in cppyy_constructor"); } @@ -506,8 +888,30 @@ /* data member reflection information ------------------------------------- */ -int cppyy_num_datamembers(cppyy_scope_t /* handle */) { - return 0; +int cppyy_num_datamembers(cppyy_scope_t handle) { + return s_scopes[handle].m_datambrs.size(); +} + +char* cppyy_datamember_name(cppyy_scope_t handle, int idatambr) { + return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_name); +} + +char* cppyy_datamember_type(cppyy_scope_t handle, int idatambr) { + return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); +} + +size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { + return s_scopes[handle].m_datambrs[idatambr].m_offset; +} + + +/* data member properties ------------------------------------------------ */ +int cppyy_is_publicdata(cppyy_scope_t handle, int idatambr) { + return 1; +} + +int cppyy_is_staticdata(cppyy_scope_t handle, int idatambr) { + return s_scopes[handle].m_datambrs[idatambr].m_isstatic; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,14 +7,18 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0] or \ - 'test_pythonify.py' in item.location[0]): + import os + tst = os.path.basename(item.location[0]) + if not tst in ('test_helper.py', 'test_cppyy.py', 'test_pythonify.py', + 'test_datatypes.py'): py.test.skip("genreflex is not installed") import re - if 'test_pythonify.py' in item.location[0] and \ + if tst == 'test_pythonify.py' and \ not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") + elif tst == 'test_datatypes.py' and \ + not re.search("AppTestDATATYPES.test0[1-8]", item.location[2]): + py.test.skip("genreflex is not installed") def pytest_ignore_collect(path, config): if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: From noreply at buildbot.pypy.org Thu May 1 09:26:19 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:19 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140501072619.C4BB91C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71124:d37dab820a6a Date: 2014-04-30 23:46 -0700 http://bitbucket.org/pypy/pypy/changeset/d37dab820a6a/ Log: merge default into branch diff too long, truncating to 2000 out of 7401 lines diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = types.ModuleType('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser diff --git a/pypy/doc/config/objspace.usemodules.oracle.txt b/pypy/doc/config/objspace.usemodules.oracle.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.txt +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -58,7 +58,6 @@ math mmap operator - oracle parser posix pyexpat diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -92,7 +92,7 @@ * Support for OpenBSD * Code cleanup: we continue to prune out old and unused code, and to refactor - large parts of the codebase. We have sepearated rpython from the PyPy python + large parts of the codebase. We have separated rpython from the PyPy python interpreter, and rpython is seeing use in other dynamic language projects. * Support for precompiled headers in the build process for MSVC diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -152,3 +152,12 @@ .. branch: small-unroll-improvements Improve optimization of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 + +.. branch: lexer_token_position_class + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,12 +3,6 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ba569fe1efdb +.. startrev: 0524dae88c75 -.. branch: small-unroll-improvements -Improve optimiziation of small allocation-heavy loops in the JIT -.. branch: reflex-support - -.. branch: refactor-buffer-api -Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -321,10 +321,11 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i]) + key = self.keywords[i] + space.setitem_str(w_kwds, key, self.keywords_w[i]) else: w_key = self.keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, self.keywords_w[i]) + space.setitem(w_kwds, w_key, self.keywords_w[i]) return w_args, w_kwds # JIT helper functions @@ -416,10 +417,10 @@ break else: if i < limit: - w_key = space.wrap(keywords[i]) + space.setitem_str(w_kwds, keywords[i], keywords_w[i]) else: w_key = keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, keywords_w[i]) + space.setitem(w_kwds, w_key, keywords_w[i]) # # ArgErr family of exceptions raised in case of argument mismatch. diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -93,6 +93,7 @@ def setitem(self, obj, key, value): obj[key] = value + setitem_str = setitem def getitem(self, obj, key): return obj[key] diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -387,3 +387,9 @@ # because it's a regular method, and .__objclass__ # differs from .im_class in case the method is # defined in some parent class of l's actual class + + def test_func_closure(self): + x = 2 + def f(): + return x + assert f.__closure__[0].cell_contents is x diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -809,6 +809,7 @@ __dict__ = getset_func_dict, __defaults__ = getset_func_defaults, __globals__ = interp_attrproperty_w('w_func_globals', cls=Function), + __closure__ = GetSetProperty(Function.fget_func_closure), __module__ = getset___module__, __weakref__ = make_weakref_descr(Function), ) diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -27,6 +27,7 @@ consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) + if not dont_inherit: caller = ec.gettopframe_nohidden() if caller: @@ -37,8 +38,7 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - w_ast_type = space.gettypeobject(ast.AST.typedef) - if space.isinstance_w(w_source, w_ast_type): + if space.isinstance_w(w_source, space.gettypeobject(ast.AST.typedef)): ast_node = space.interp_w(ast.mod, w_source) ast_node.sync_app_attrs(space) code = ec.compiler.compile_ast(ast_node, filename, mode, flags) @@ -47,20 +47,20 @@ if space.isinstance_w(w_source, space.w_unicode): w_utf_8_source = space.call_method(w_source, "encode", space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) + source = space.str_w(w_utf_8_source) # This flag tells the parser to reject any coding cookies it sees. flags |= consts.PyCF_SOURCE_IS_UTF8 else: - str_ = space.readbuf_w(w_source).as_str() + source = space.readbuf_w(w_source).as_str() - if '\x00' in str_: + if '\x00' in source: raise OperationError(space.w_TypeError, space.wrap( "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: - code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + code = ec.compiler.compile_to_ast(source, filename, mode, flags) else: - code = ec.compiler.compile(str_, filename, mode, flags) + code = ec.compiler.compile(source, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -189,38 +189,32 @@ subentry._stop(tt, it) - at jit.elidable_promote() def create_spec_for_method(space, w_function, w_type): - w_function = w_function + class_name = None if isinstance(w_function, Function): name = w_function.name + # try to get the real class that defines the method, + # which is a superclass of the class of the instance + from pypy.objspace.std.typeobject import W_TypeObject # xxx + if isinstance(w_type, W_TypeObject): + w_realclass, _ = space.lookup_in_type_where(w_type, name) + if isinstance(w_realclass, W_TypeObject): + class_name = w_realclass.get_module_type_name() else: name = '?' - # try to get the real class that defines the method, - # which is a superclass of the class of the instance - from pypy.objspace.std.typeobject import W_TypeObject # xxx - class_name = w_type.getname(space) # if the rest doesn't work - if isinstance(w_type, W_TypeObject) and name != '?': - w_realclass, _ = space.lookup_in_type_where(w_type, name) - if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + if class_name is None: + class_name = w_type.getname(space) # if the rest doesn't work return "{method '%s' of '%s' objects}" % (name, class_name) - at jit.elidable_promote() def create_spec_for_function(space, w_func): - if w_func.w_module is None: - module = '' - else: + if w_func.w_module is not None: module = space.str_w(w_func.w_module) - if module == '__builtin__': - module = '' - else: - module += '.' - return '{%s%s}' % (module, w_func.name) + if module != '__builtin__': + return '{%s.%s}' % (module, w_func.name) + return '{%s}' % w_func.name - at jit.elidable_promote() def create_spec_for_object(space, w_obj): class_name = space.type(w_obj).getname(space) return "{'%s' object}" % (class_name,) @@ -345,6 +339,7 @@ def _enter_builtin_call(self, key): self = jit.promote(self) + key = jit.promote_string(key) entry = self._get_or_make_builtin_entry(key) self.current_context = ProfilerContext(self, entry) @@ -353,6 +348,7 @@ if context is None: return self = jit.promote(self) + key = jit.promote_string(key) try: entry = self._get_or_make_builtin_entry(key, False) except KeyError: diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,7 +600,8 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_RSocket.typedef = TypeDef("socket", + __module__ = "_socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -313,6 +313,11 @@ cls.space = space cls.w_udir = space.wrap(str(udir)) + def test_module(self): + import _socket + assert _socket.socket.__name__ == 'socket' + assert _socket.socket.__module__ == '_socket' + def test_ntoa_exception(self): import _socket raises(_socket.error, _socket.inet_ntoa, "ab") diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -448,6 +448,9 @@ self.descr_delitem(space, space.newslice(w_start, w_stop, space.w_None)) + def descr_iter(self, space): + return space.newseqiter(self) + def descr_add(self, space, w_other): raise NotImplementedError @@ -503,6 +506,7 @@ __setslice__ = interp2app(W_ArrayBase.descr_setslice), __delitem__ = interp2app(W_ArrayBase.descr_delitem), __delslice__ = interp2app(W_ArrayBase.descr_delslice), + __iter__ = interp2app(W_ArrayBase.descr_iter), __add__ = interpindirect2app(W_ArrayBase.descr_add), __iadd__ = interpindirect2app(W_ArrayBase.descr_inplace_add), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -697,6 +697,8 @@ for i in a: b.append(i) assert repr(b) == "array('i', [1, 2, 3])" + assert hasattr(b, '__iter__') + assert next(b.__iter__()) == 1 def test_lying_iterable(self): class lier(object): diff --git a/pypy/module/cpyext/patches/cx_Oracle.patch b/pypy/module/cpyext/patches/cx_Oracle.patch deleted file mode 100644 --- a/pypy/module/cpyext/patches/cx_Oracle.patch +++ /dev/null @@ -1,60 +0,0 @@ -Index: cx_Oracle.c -=================================================================== ---- cx_Oracle.c (r�vision 333) -+++ cx_Oracle.c (copie de travail) -@@ -65,6 +65,13 @@ - #define CXORA_BASE_EXCEPTION PyExc_StandardError - #endif - -+// define missing PyDateTime_DELTA macros -+#ifndef PYPY_VERSION -+PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)o)->days) -+PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)o)->seconds) -+PyDateTime_DELTA_GET_MICROSECONDS(o) (((PyDateTime_Delta*)o)->microseconds) -+#endif -+ - // define simple construct for determining endianness of the platform - // Oracle uses native encoding with OCI_UTF16 but bails when a BOM is written - #define IS_LITTLE_ENDIAN (int)*(unsigned char*) &one -@@ -138,6 +145,7 @@ - *exception = PyErr_NewException(buffer, baseException, NULL); - if (!*exception) - return -1; -+ Py_INCREF(*exception); - return PyModule_AddObject(module, name, *exception); - } - -Index: IntervalVar.c -=================================================================== ---- IntervalVar.c (r�vision 333) -+++ IntervalVar.c (copie de travail) -@@ -121,7 +121,7 @@ - unsigned pos, // array position to set - PyObject *value) // value to set - { -- sb4 hours, minutes, seconds; -+ sb4 days, hours, minutes, seconds, microseconds; - PyDateTime_Delta *delta; - sword status; - -@@ -131,13 +131,16 @@ - } - - delta = (PyDateTime_Delta*) value; -- hours = (sb4) delta->seconds / 3600; -- seconds = delta->seconds - hours * 3600; -+ days = PyDateTime_DELTA_GET_DAYS(delta); -+ seconds = PyDateTime_DELTA_GET_SECONDS(delta); -+ hours = (sb4) seconds / 3600; -+ seconds -= hours * 3600; - minutes = (sb4) seconds / 60; - seconds -= minutes * 60; -+ microseconds = PyDateTime_DELTA_GET_MICROSECONDS(delta); - status = OCIIntervalSetDaySecond(var->environment->handle, -- var->environment->errorHandle, delta->days, hours, minutes, -- seconds, delta->microseconds, var->data[pos]); -+ var->environment->errorHandle, days, hours, minutes, -+ seconds, microseconds, var->data[pos]); - if (Environment_CheckForError(var->environment, status, - "IntervalVar_SetValue()") < 0) - return -1; diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -1,3 +1,4 @@ +import string from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt @@ -28,9 +29,11 @@ if not space.is_none(out): return out - dtype = w_arr_list[0].get_dtype() - for w_arr in w_arr_list[1:]: - dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) + dtype = None + for w_arr in w_arr_list: + if not space.is_none(w_arr): + dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) + assert dtype is not None out = base.W_NDimArray.from_shape(space, shape, dtype) return out @@ -468,6 +471,23 @@ return dtype_from_list(space, w_lst, True) +def _check_for_commastring(s): + if s[0] in string.digits or s[0] in '<>=|' and s[1] in string.digits: + return True + if s[0] == '(' and s[1] == ')' or s[0] in '<>=|' and s[1] == '(' and s[2] == ')': + return True + sqbracket = 0 + for c in s: + if c == ',': + if sqbracket == 0: + return True + elif c == '[': + sqbracket += 1 + elif c == ']': + sqbracket -= 1 + return False + + def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None): # w_align and w_copy are necessary for pickling cache = get_dtype_cache(space) @@ -497,7 +517,7 @@ return w_dtype elif space.isinstance_w(w_dtype, space.w_str): name = space.str_w(w_dtype) - if ',' in name: + if _check_for_commastring(name): return dtype_from_spec(space, w_dtype) cname = name[1:] if name[0] == NPY.OPPBYTE else name try: @@ -508,7 +528,7 @@ if name[0] == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) return dtype - if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': + if name[0] in 'VSUca' or name[0] in '<>=|' and name[1] in 'VSUca': return variable_dtype(space, name) raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) elif space.isinstance_w(w_dtype, space.w_list): @@ -589,7 +609,7 @@ raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: return new_string_dtype(space, 1, NPY.CHARLTR) - elif char == NPY.STRINGLTR: + elif char == NPY.STRINGLTR or char == NPY.STRINGLTR2: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -606,25 +606,34 @@ reds = 'auto') def clip(space, arr, shape, min, max, out): + assert min or max arr_iter, arr_state = arr.create_iter(shape) + if min is not None: + min_iter, min_state = min.create_iter(shape) + else: + min_iter, min_state = None, None + if max is not None: + max_iter, max_state = max.create_iter(shape) + else: + max_iter, max_state = None, None + out_iter, out_state = out.create_iter(shape) + shapelen = len(shape) dtype = out.get_dtype() - shapelen = len(shape) - min_iter, min_state = min.create_iter(shape) - max_iter, max_state = max.create_iter(shape) - out_iter, out_state = out.create_iter(shape) while not arr_iter.done(arr_state): clip_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_v = arr_iter.getitem(arr_state).convert_to(space, dtype) - w_min = min_iter.getitem(min_state).convert_to(space, dtype) - w_max = max_iter.getitem(max_state).convert_to(space, dtype) - if dtype.itemtype.lt(w_v, w_min): - w_v = w_min - elif dtype.itemtype.gt(w_v, w_max): - w_v = w_max + arr_state = arr_iter.next(arr_state) + if min_iter is not None: + w_min = min_iter.getitem(min_state).convert_to(space, dtype) + if dtype.itemtype.lt(w_v, w_min): + w_v = w_min + min_state = min_iter.next(min_state) + if max_iter is not None: + w_max = max_iter.getitem(max_state).convert_to(space, dtype) + if dtype.itemtype.gt(w_v, w_max): + w_v = w_max + max_state = max_iter.next(max_state) out_iter.setitem(out_state, w_v) - arr_state = arr_iter.next(arr_state) - min_state = min_iter.next(min_state) - max_state = max_iter.next(max_state) out_state = out_iter.next(out_state) round_driver = jit.JitDriver(name='numpy_round_driver', diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -593,17 +593,25 @@ def descr_choose(self, space, w_choices, w_out=None, w_mode=None): return choose(space, self, w_choices, w_out, w_mode) - def descr_clip(self, space, w_min, w_max, w_out=None): + def descr_clip(self, space, w_min=None, w_max=None, w_out=None): + if space.is_none(w_min): + w_min = None + else: + w_min = convert_to_array(space, w_min) + if space.is_none(w_max): + w_max = None + else: + w_max = convert_to_array(space, w_max) if space.is_none(w_out): w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) - min = convert_to_array(space, w_min) - max = convert_to_array(space, w_max) - shape = shape_agreement_multiple(space, [self, min, max, w_out]) - out = descriptor.dtype_agreement(space, [self, min, max], shape, w_out) - loop.clip(space, self, shape, min, max, out) + if not w_min and not w_max: + raise oefmt(space.w_ValueError, "One of max or min must be given.") + shape = shape_agreement_multiple(space, [self, w_min, w_max, w_out]) + out = descriptor.dtype_agreement(space, [self, w_min, w_max], shape, w_out) + loop.clip(space, self, shape, w_min, w_max, out) return out def descr_get_ctypes(self, space): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -191,6 +191,9 @@ d = dtype('S5') assert repr(d) == "dtype('S5')" assert str(d) == "|S5" + d = dtype('a5') + assert repr(d) == "dtype('S5')" + assert str(d) == "|S5" d = dtype('U5') assert repr(d) == "dtype('%sU5')" % b assert str(d) == "%sU5" % b diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2229,7 +2229,13 @@ def test_clip(self): from numpypy import array a = array([1, 2, 17, -3, 12]) + exc = raises(ValueError, a.clip) + assert str(exc.value) == "One of max or min must be given." assert (a.clip(-2, 13) == [1, 2, 13, -2, 12]).all() + assert (a.clip(min=-2) == [1, 2, 17, -2, 12]).all() + assert (a.clip(min=-2, max=None) == [1, 2, 17, -2, 12]).all() + assert (a.clip(max=13) == [1, 2, 13, -3, 12]).all() + assert (a.clip(min=None, max=13) == [1, 2, 13, -3, 12]).all() assert (a.clip(-1, 1, out=None) == [1, 1, 1, -1, 1]).all() assert (a == [1, 2, 17, -3, 12]).all() assert (a.clip(-1, [1, 2, 3, 4, 5]) == [1, 2, 3, -1, 5]).all() diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -310,7 +310,7 @@ assert math.isnan(fmod(v, 2)) def test_minimum(self): - from numpypy import array, minimum + from numpypy import array, minimum, nan, isnan a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -318,8 +318,12 @@ for i in range(3): assert c[i] == min(a[i], b[i]) + arg1 = array([0, nan, nan]) + arg2 = array([nan, 0, nan]) + assert isnan(minimum(arg1, arg2)).all() + def test_maximum(self): - from numpypy import array, maximum + from numpypy import array, maximum, nan, isnan a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -327,6 +331,10 @@ for i in range(3): assert c[i] == max(a[i], b[i]) + arg1 = array([0, nan, nan]) + arg2 = array([nan, 0, nan]) + assert isnan(maximum(arg1, arg2)).all() + x = maximum(2, 3) assert x == 3 assert isinstance(x, (int, long)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -705,20 +705,20 @@ return math.fabs(v) @simple_binary_op + def max(self, v1, v2): + return v1 if v1 >= v2 or rfloat.isnan(v1) else v2 + + @simple_binary_op + def min(self, v1, v2): + return v1 if v1 <= v2 or rfloat.isnan(v1) else v2 + + @simple_binary_op def fmax(self, v1, v2): - if rfloat.isnan(v2): - return v1 - elif rfloat.isnan(v1): - return v2 - return max(v1, v2) + return v1 if v1 >= v2 or rfloat.isnan(v2) else v2 @simple_binary_op def fmin(self, v1, v2): - if rfloat.isnan(v2): - return v1 - elif rfloat.isnan(v1): - return v2 - return min(v1, v2) + return v1 if v1 <= v2 or rfloat.isnan(v2) else v2 @simple_binary_op def fmod(self, v1, v2): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -477,6 +477,8 @@ promote_bools=False): if dt2 is None: return dt1 + if dt1 is None: + return dt2 # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 diff --git a/pypy/module/oracle/__init__.py b/pypy/module/oracle/__init__.py deleted file mode 100644 --- a/pypy/module/oracle/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule - -class Module(MixedModule): - applevel_name = 'cx_Oracle' - - interpleveldefs = { - 'connect': 'interp_connect.W_Connection', - 'Connection': 'interp_connect.W_Connection', - 'NUMBER': 'interp_variable.VT_Float', - 'STRING': 'interp_variable.VT_String', - 'UNICODE': 'interp_variable.VT_NationalCharString', - 'DATETIME': 'interp_variable.VT_DateTime', - 'DATE': 'interp_variable.VT_Date', - 'TIMESTAMP': 'interp_variable.VT_Timestamp', - 'INTERVAL': 'interp_variable.VT_Interval', - 'BINARY': 'interp_variable.VT_Binary', - 'LONG_STRING': 'interp_variable.VT_LongString', - 'LONG_BINARY': 'interp_variable.VT_LongBinary', - 'FIXED_CHAR': 'interp_variable.VT_FixedChar', - 'FIXED_UNICODE': 'interp_variable.VT_FixedNationalChar', - 'CURSOR': 'interp_variable.VT_Cursor', - 'BLOB': 'interp_variable.VT_BLOB', - 'CLOB': 'interp_variable.VT_CLOB', - 'OBJECT': 'interp_variable.VT_Object', - 'Variable': 'interp_variable.W_Variable', - 'SessionPool': 'interp_pool.W_SessionPool', - } - - appleveldefs = { - 'version': 'app_oracle.version', - 'paramstyle': 'app_oracle.paramstyle', - 'makedsn': 'app_oracle.makedsn', - 'TimestampFromTicks': 'app_oracle.TimestampFromTicks', - } - for name in """DataError DatabaseError Error IntegrityError InterfaceError - InternalError NotSupportedError OperationalError - ProgrammingError Warning""".split(): - appleveldefs[name] = "app_oracle.%s" % (name,) - - def startup(self, space): - from pypy.module.oracle.interp_error import get - state = get(space) - state.startup(space) - (state.w_DecimalType, - state.w_DateTimeType, state.w_DateType, state.w_TimedeltaType, - ) = space.fixedview(space.appexec([], """(): - import decimal, datetime - return (decimal.Decimal, - datetime.datetime, datetime.date, datetime.timedelta) - """)) - space.setattr(space.wrap(self), - space.wrap("Timestamp"), state.w_DateTimeType) - space.setattr(space.wrap(self), - space.wrap("Date"), state.w_DateType) diff --git a/pypy/module/oracle/app_oracle.py b/pypy/module/oracle/app_oracle.py deleted file mode 100644 --- a/pypy/module/oracle/app_oracle.py +++ /dev/null @@ -1,42 +0,0 @@ -version = '5.0.0' -paramstyle = 'named' - -class Warning(StandardError): - pass - -class Error(StandardError): - pass - -class InterfaceError(Error): - pass - -class DatabaseError(Error): - pass - -class DataError(DatabaseError): - pass - -class OperationalError(DatabaseError): - pass - -class IntegrityError(DatabaseError): - pass - -class InternalError(DatabaseError): - pass - -class ProgrammingError(DatabaseError): - pass - -class NotSupportedError(DatabaseError): - pass - - -def makedsn(host, port, sid): - return ("(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=" - "(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))" - "(CONNECT_DATA=(SID=%s)))" % (host, port, sid)) - -def TimestampFromTicks(*args): - import datetime - return datetime.datetime.fromtimestamp(*args) diff --git a/pypy/module/oracle/config.py b/pypy/module/oracle/config.py deleted file mode 100644 --- a/pypy/module/oracle/config.py +++ /dev/null @@ -1,54 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.oracle import roci - -WITH_UNICODE = False - -MAX_STRING_CHARS = 4000 -MAX_BINARY_BYTES = 4000 - -if WITH_UNICODE: - CHARSETID = roci.OCI_UTF16ID - BYTES_PER_CHAR = 2 - def string_w(space, w_obj): - return space.unicode_w(w_obj) -else: - def string_w(space, w_obj): - return space.str_w(w_obj) - - def w_string(space, buf, len=-1): - #assert type(len) is int - if len < 0: - return space.wrap(rffi.charp2str(buf)) - else: - return space.wrap(rffi.charpsize2str(buf, len)) - CHARSETID = 0 - BYTES_PER_CHAR = 1 - - class StringBuffer: - "Fill a char* buffer with data, suitable to pass to Oracle functions" - def __init__(self): - self.ptr = lltype.nullptr(roci.oratext.TO) - self.size = 0 - - def fill(self, space, w_value): - if w_value is None or space.is_w(w_value, space.w_None): - self.clear() - else: - strvalue = space.str_w(w_value) - self.ptr = rffi.str2charp(strvalue) - self.size = len(strvalue) - - def fill_with_unicode(self, space, w_value): - if w_value is None or space.is_w(w_value, space.w_None): - self.clear() - else: - # XXX ucs2 only probably - univalue = space.unicode_w(w_value) - self.ptr = rffi.cast(roci.oratext, rffi.unicode2wcharp(univalue)) - self.size = len(univalue) * 2 - - def clear(self): - if self.ptr: - rffi.free_charp(self.ptr) - self.ptr = lltype.nullptr(roci.oratext.TO) - self.size = 0 diff --git a/pypy/module/oracle/conftest.py b/pypy/module/oracle/conftest.py deleted file mode 100644 --- a/pypy/module/oracle/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ -import os - -def pytest_addoption(parser): - group = parser.getgroup("Oracle module options") - group.addoption('--oracle-home', dest="oracle_home", - help="Home directory of Oracle client installation", - default=os.environ.get("ORACLE_HOME")) - group.addoption('--oracle-connect', dest="oracle_connect", - help="connect string (user/pwd at db) used for tests") diff --git a/pypy/module/oracle/interp_connect.py b/pypy/module/oracle/interp_connect.py deleted file mode 100644 --- a/pypy/module/oracle/interp_connect.py +++ /dev/null @@ -1,551 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w, - GetSetProperty) -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype - -from pypy.module.oracle import roci, interp_error -from pypy.module.oracle.config import string_w, StringBuffer, MAX_STRING_CHARS -from pypy.module.oracle.interp_environ import Environment -from pypy.module.oracle.interp_cursor import W_Cursor -from pypy.module.oracle.interp_pool import W_SessionPool -from pypy.module.oracle.interp_variable import VT_String - - -class W_Connection(W_Root): - def __init__(self): - self.commitMode = roci.OCI_DEFAULT - self.environment = None - self.autocommit = False - - self.sessionHandle = lltype.nullptr(roci.OCISession.TO) - self.serverHandle = lltype.nullptr(roci.OCIServer.TO) - - self.w_inputTypeHandler = None - self.w_outputTypeHandler = None - - self.w_version = None - self.release = False - - - @unwrap_spec(mode=int, handle=int, - threaded=bool, twophase=bool, events=bool, - purity=bool) - def descr_new(space, w_subtype, - w_user=None, - w_password=None, - w_dsn=None, - mode=roci.OCI_DEFAULT, - handle=0, # XXX should be a ptr type - w_pool=None, - threaded=False, - twophase=False, - events=False, - w_cclass=None, - purity=0, - w_newpassword=None): - self = space.allocate_instance(W_Connection, w_subtype) - W_Connection.__init__(self) - - # set up the environment - if w_pool: - pool = space.interp_w(W_SessionPool, w_pool) - self.environment = pool.environment.clone() - else: - pool = None - self.environment = Environment.create(space, threaded, events) - - self.w_username = w_user - self.w_password = w_password - self.w_tnsentry = w_dsn - - # perform some parsing, if necessary - if (self.w_username and not self.w_password and - space.is_true(space.contains(self.w_username, space.wrap('/')))): - (self.w_username, self.w_password) = space.listview( - space.call_method(self.w_username, 'split', - space.wrap('/'), space.wrap(1))) - - if (self.w_password and not self.w_tnsentry and - space.is_true(space.contains(self.w_password, space.wrap('@')))): - (self.w_password, self.w_tnsentry) = space.listview( - space.call_method(self.w_password, 'split', - space.wrap('@'), space.wrap(1))) - - if pool or w_cclass is not None: - self.getConnection(space, pool, w_cclass, purity) - else: - self.connect(space, mode, twophase) - return space.wrap(self) - - def __del__(self): - self.enqueue_for_destruction(self.environment.space, - W_Connection.destructor, - '__del__ method of ') - - def destructor(self): - assert isinstance(self, W_Connection) - if self.release: - roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - roci.OCISessionRelease( - self.handle, self.environment.errorHandle, - None, 0, roci.OCI_DEFAULT) - else: - if self.sessionHandle: - roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - roci.OCISessionEnd( - self.handle, self.environment.errorHandle, - self.sessionHandle, roci.OCI_DEFAULT) - if self.serverHandle: - roci.OCIServerDetach( - self.serverHandle, self.environment.errorHandle, - roci.OCI_DEFAULT) - - def connect(self, space, mode, twophase): - stringBuffer = StringBuffer() - - # allocate the server handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIServer).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SERVER, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate server handle") - self.serverHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - # attach to the server - stringBuffer.fill(space, self.w_tnsentry) - try: - status = roci.OCIServerAttach( - self.serverHandle, - self.environment.errorHandle, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Connect(): server attach") - finally: - stringBuffer.clear() - - # allocate the service context handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISvcCtx).TO, - 1, flavor='raw') - - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SVCCTX, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate service context handle") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - # set attribute for server handle - status = roci.OCIAttrSet( - self.handle, roci.OCI_HTYPE_SVCCTX, - self.serverHandle, 0, - roci.OCI_ATTR_SERVER, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set server handle") - - # set the internal and external names; these are needed for global - # transactions but are limited in terms of the lengths of the strings - if twophase: - status = roci.OCIAttrSet( - self.serverHandle, roci.OCI_HTYPE_SERVER, - "cx_Oracle", 0, - roci.OCI_ATTR_INTERNAL_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set internal name") - status = roci.OCIAttrSet( - self.serverHandle, roci.OCI_HTYPE_SERVER, - "cx_Oracle", 0, - roci.OCI_ATTR_EXTERNAL_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set external name") - - # allocate the session handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISession).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_SESSION, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_Connect(): allocate session handle") - self.sessionHandle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - credentialType = roci.OCI_CRED_EXT - - # set user name in session handle - stringBuffer.fill(space, self.w_username) - try: - if stringBuffer.size > 0: - credentialType = roci.OCI_CRED_RDBMS - status = roci.OCIAttrSet( - self.sessionHandle, - roci.OCI_HTYPE_SESSION, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_USERNAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set user name") - finally: - stringBuffer.clear() - - # set password in session handle - stringBuffer.fill(space, self.w_password) - try: - if stringBuffer.size > 0: - credentialType = roci.OCI_CRED_RDBMS - status = roci.OCIAttrSet( - self.sessionHandle, - roci.OCI_HTYPE_SESSION, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_PASSWORD, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set password") - finally: - stringBuffer.clear() - - # set the session handle on the service context handle - status = roci.OCIAttrSet( - self.handle, roci.OCI_HTYPE_SVCCTX, - self.sessionHandle, 0, - roci.OCI_ATTR_SESSION, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_Connect(): set session handle") - - # if a new password has been specified, change it which will also - # establish the session - - # begin the session - status = roci.OCISessionBegin( - self.handle, self.environment.errorHandle, - self.sessionHandle, credentialType, mode) - try: - self.environment.checkForError( - status, "Connection_Connect(): begin session") - except: - self.sessionHandle = lltype.nullptr(roci.OCISession.TO) - raise - - def getConnection(self, space, pool, w_cclass, purity): - """Get a connection using the OCISessionGet() interface - rather than using the low level interface for connecting.""" - - proxyCredentials = False - authInfo = lltype.nullptr(roci.OCIAuthInfo.TO) - - if pool: - w_dbname = pool.w_name - mode = roci.OCI_SESSGET_SPOOL - if not pool.homogeneous and pool.w_username and self.w_username: - proxyCredentials = space.is_true(space.ne(pool.w_username, self.w_username)) - mode |= roci.OCI_SESSGET_CREDPROXY - else: - w_dbname = self.w_tnsentry - mode = roci.OCI_SESSGET_STMTCACHE - - stringBuffer = StringBuffer() - - # set up authorization handle, if needed - if not pool or w_cclass or proxyCredentials: - # create authorization handle - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIAuthInfo).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, - roci.OCI_HTYPE_AUTHINFO, - 0, lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Connection_GetConnection(): allocate handle") - - authInfo = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - - externalCredentials = True - - # set the user name, if applicable - stringBuffer.fill(space, self.w_username) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_USERNAME, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set user name") - finally: - stringBuffer.clear() - - # set the password, if applicable - stringBuffer.fill(space, self.w_password) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_PASSWORD, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set password") - finally: - stringBuffer.clear() - - # if no user name or password are set, using external credentials - if not pool and externalCredentials: - mode |= roci.OCI_SESSGET_CREDEXT - - # set the connection class, if applicable - if roci.OCI_ATTR_CONNECTION_CLASS is not None: - stringBuffer.fill(space, w_cclass) - try: - if stringBuffer.size > 0: - externalCredentials = False - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - stringBuffer.ptr, stringBuffer.size, - roci.OCI_ATTR_CONNECTION_CLASS, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Connection_GetConnection(): set connection class") - finally: - stringBuffer.clear() - - # set the purity, if applicable - if (roci.OCI_ATTR_PURITY is not None - and purity != roci.OCI_ATTR_PURITY_DEFAULT): - purityptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, - 1, flavor='raw') - purityptr[0] = rffi.cast(roci.ub4, purity) - try: - status = roci.OCIAttrSet( - authInfo, - roci.OCI_HTYPE_AUTHINFO, - rffi.cast(roci.dvoidp, purityptr), - rffi.sizeof(roci.ub4), - roci.OCI_ATTR_PURITY, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetConnection(): set purity") - finally: - lltype.free(purityptr, flavor='raw') - - # acquire the new session - stringBuffer.fill(space, w_dbname) - foundptr = lltype.malloc(rffi.CArrayPtr(roci.boolean).TO, - 1, flavor='raw') - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCISvcCtx).TO, - 1, flavor='raw') - try: - status = roci.OCISessionGet( - self.environment.handle, - self.environment.errorHandle, - handleptr, - authInfo, - stringBuffer.ptr, stringBuffer.size, - None, 0, - lltype.nullptr(roci.Ptr(roci.oratext).TO), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - foundptr, - mode) - self.environment.checkForError( - status, "Connection_GetConnection(): get connection") - - self.handle = handleptr[0] - finally: - stringBuffer.clear() - lltype.free(foundptr, flavor='raw') - lltype.free(handleptr, flavor='raw') - - # eliminate the authorization handle immediately, if applicable - if authInfo: - roci.OCIHandleFree(authInfo, roci.OCI_HTYPE_AUTHINFO) - - # copy members in the case where a pool is being used - if pool: - if not proxyCredentials: - self.w_username = pool.w_username - self.w_password = pool.w_password - self.w_tnsentry = pool.w_tnsentry - self.sessionPool = pool - - self.release = True - - def _checkConnected(self, space): - if not self.handle: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap("not connected")) - - def close(self, space): - # make sure we are actually connnected - self._checkConnected(space) - - # perform a rollback - status = roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Close(): rollback") - - # logoff of the server - if self.sessionHandle: - status = roci.OCISessionEnd( - self.handle, self.environment.errorHandle, - self.sessionHandle, roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Close(): end session") - roci.OCIHandleFree(self.handle, roci.OCI_HTYPE_SVCCTX) - - self.handle = lltype.nullptr(roci.OCISvcCtx.TO) - - def commit(self, space): - # make sure we are actually connected - self._checkConnected(space) - - status = roci.OCITransCommit( - self.handle, self.environment.errorHandle, - self.commitMode) - self.environment.checkForError( - status, "Connection_Commit()") - - self.commitMode = roci.OCI_DEFAULT - - def rollback(self, space): - # make sure we are actually connected - self._checkConnected(space) - - status = roci.OCITransRollback( - self.handle, self.environment.errorHandle, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Connection_Rollback()") - - def newCursor(self, space): - return space.wrap(W_Cursor(space, self)) - - def _getCharacterSetName(self, space, attribute): - # get character set id - charsetIdPtr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, - 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.environment.handle, roci.OCI_HTYPE_ENV, - rffi.cast(roci.dvoidp, charsetIdPtr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - attribute, - self.environment.errorHandle) - self.environment.checkForError( - status, "Connection_GetCharacterSetName(): get charset id") - charsetId = charsetIdPtr[0] - finally: - lltype.free(charsetIdPtr, flavor='raw') - - # get character set name - charsetname_buf, charsetname = rffi.alloc_buffer(roci.OCI_NLS_MAXBUFSZ) - try: - status = roci.OCINlsCharSetIdToName( - self.environment.handle, - charsetname_buf, roci.OCI_NLS_MAXBUFSZ, - charsetId) - self.environment.checkForError( - status, - "Connection_GetCharacterSetName(): get Oracle charset name") - - ianacharset_buf, ianacharset = rffi.alloc_buffer( - roci.OCI_NLS_MAXBUFSZ) - - try: - # get IANA character set name - status = roci.OCINlsNameMap( - self.environment.handle, - ianacharset_buf, roci.OCI_NLS_MAXBUFSZ, - charsetname_buf, roci.OCI_NLS_CS_ORA_TO_IANA) - self.environment.checkForError( - status, - "Connection_GetCharacterSetName(): translate NLS charset") - charset = rffi.charp2str(ianacharset_buf) - finally: - rffi.keep_buffer_alive_until_here(ianacharset_buf, ianacharset) - finally: - rffi.keep_buffer_alive_until_here(charsetname_buf, charsetname) - return space.wrap(charset) - - def get_encoding(self, space): - return self._getCharacterSetName(space, roci.OCI_ATTR_ENV_CHARSET_ID) - def get_nationalencoding(self, space): - return self._getCharacterSetName(space, roci.OCI_ATTR_ENV_CHARSET_ID) - def get_maxbytespercharacter(self, space): - return space.wrap(self.environment.maxBytesPerCharacter) - - def get_version(self, space): - # if version has already been determined, no need to determine again - if self.w_version: - return self.w_version - - # allocate a cursor to retrieve the version - cursor = W_Cursor(space, self) - - # allocate version and compatibility variables - versionVar = VT_String(cursor, cursor.arraySize, MAX_STRING_CHARS) - compatVar = VT_String(cursor, cursor.arraySize, MAX_STRING_CHARS) - - # call stored procedure - cursor._call(space, "dbms_utility.db_version", - None, space.newlist([space.wrap(versionVar), - space.wrap(compatVar)])) - - # retrieve value - self.w_version = versionVar.getValue(space, 0) - return self.w_version - -W_Connection.typedef = TypeDef( - "Connection", - __new__ = interp2app(W_Connection.descr_new.im_func), - username = interp_attrproperty_w('w_username', W_Connection), - password = interp_attrproperty_w('w_password', W_Connection), - tnsentry = interp_attrproperty_w('w_tnsentry', W_Connection), - - close = interp2app(W_Connection.close), - commit = interp2app(W_Connection.commit), - rollback = interp2app(W_Connection.rollback), - - cursor = interp2app(W_Connection.newCursor), - - encoding = GetSetProperty(W_Connection.get_encoding), - nationalencoding = GetSetProperty(W_Connection.get_nationalencoding), - maxBytesPerCharacter = GetSetProperty(W_Connection.get_maxbytespercharacter), - version = GetSetProperty(W_Connection.get_version), - ) diff --git a/pypy/module/oracle/interp_cursor.py b/pypy/module/oracle/interp_cursor.py deleted file mode 100644 --- a/pypy/module/oracle/interp_cursor.py +++ /dev/null @@ -1,1094 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.typedef import interp_attrproperty, interp_attrproperty_w -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype - -from pypy.module.oracle import roci, interp_error -from pypy.module.oracle.config import w_string, string_w, StringBuffer -from pypy.module.oracle import interp_variable -from pypy.module.oracle.interp_error import get - -# XXX are those "assert isinstance(xxx, interp_variable.W_Variable)" necessary? -# the bindList should annotate to SomeList(SomeInstance(W_Variable)) - -class W_Cursor(W_Root): - def __init__(self, space, connection): - self.connection = connection - self.environment = connection.environment - - self.w_statement = None - self.statementType = -1 - self.handle = lltype.nullptr(roci.OCIStmt.TO) - self.isOpen = True - self.isOwned = False - - self.setInputSizes = False - self.arraySize = 50 - self.fetchArraySize = 50 - self.bindArraySize = 1 - self.bindList = None - self.bindDict = None - self.numbersAsStrings = False - self.outputSize = -1 - self.outputSizeColumn = -1 - - self.w_inputTypeHandler = None - self.w_outputTypeHandler = None - self.w_rowFactory = None - - def execute(self, space, w_stmt, __args__): - args_w, kw_w = __args__.unpack() - - if space.is_w(w_stmt, space.w_None): - w_stmt = None - - if len(args_w) > 1: - raise OperationError( - space.w_TypeError, - space.wrap("Too many arguments")) - elif len(args_w) == 1: - if len(kw_w) > 0: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap( - "expecting argument or keyword arguments, not both")) - w_vars = args_w[0] - elif len(kw_w) > 0: - w_vars = space.newdict() - for key, w_value in kw_w.iteritems(): - space.setitem(w_vars, space.wrap(key), w_value) - else: - w_vars = None - - # make sure the cursor is open - self._checkOpen(space) - - return self._execute(space, w_stmt, w_vars) - - def prepare(self, space, w_stmt, w_tag=None): - # make sure the cursor is open - self._checkOpen(space) - - # prepare the statement - self._internalPrepare(space, w_stmt, w_tag) - - def _execute(self, space, w_stmt, w_vars): - - # prepare the statement, if applicable - self._internalPrepare(space, w_stmt, None) - - # perform binds - if w_vars is None: - pass - elif space.isinstance_w(w_vars, space.w_dict): - self._setBindVariablesByName(space, w_vars, 1, 0, 0) - else: - self._setBindVariablesByPos(space, w_vars, 1, 0, 0) - self._performBind(space) - - # execute the statement - isQuery = self.statementType == roci.OCI_STMT_SELECT - if isQuery: - numIters = 0 - else: - numIters = 1 - self._internalExecute(space, numIters=numIters) - - # perform defines, if necessary - if isQuery and self.fetchVariables is None: - self._performDefine() - - # reset the values of setoutputsize() - self.outputSize = -1 - self.outputSizeColumn = -1 - - # for queries, return the cursor for convenience - if isQuery: - return space.wrap(self) - - # for all other statements, simply return None - return space.w_None - - def executemany(self, space, w_stmt, w_list_of_args): - if space.is_w(w_stmt, space.w_None): - w_stmt = None - if not space.isinstance_w(w_list_of_args, space.w_list): - raise OperationError( - space.w_TypeError, - space.wrap("list expected")) - - # make sure the cursor is open - self._checkOpen(space) - - # prepare the statement - self._internalPrepare(space, w_stmt, None) - - # queries are not supported as the result is undefined - if self.statementType == roci.OCI_STMT_SELECT: - raise OperationError( - get(space).w_NotSupportedError, - space.wrap("queries not supported: results undefined")) - - # perform binds - args_w = space.listview(w_list_of_args) - numrows = len(args_w) - for i in range(numrows): - w_arguments = args_w[i] - deferred = i < numrows - 1 - if space.isinstance_w(w_arguments, space.w_dict): - self._setBindVariablesByName( - space, w_arguments, numrows, i, deferred) - else: - self._setBindVariablesByPos( - space, w_arguments, numrows, i, deferred) - self._performBind(space) - - # execute the statement, but only if the number of rows is greater than - # zero since Oracle raises an error otherwise - if numrows > 0: - self._internalExecute(space, numIters=numrows) - - def close(self, space): - # make sure we are actually open - self._checkOpen(space) - - # close the cursor - self.freeHandle(space, raiseError=True) - - self.isOpen = False - self.handle = lltype.nullptr(roci.OCIStmt.TO) - - @unwrap_spec(name=str) - def callfunc(self, space, name, w_returnType, w_parameters=None): - retvar = interp_variable.newVariableByType(space, self, w_returnType, 1) - if space.is_none(w_parameters): - w_parameters = None - - self._call(space, name, retvar, w_parameters) - - # determine the results - return retvar.getValue(space, 0) - - @unwrap_spec(name=str) - def callproc(self, space, name, w_parameters=None): - if space.is_none(w_parameters): - w_parameters = None - - self._call(space, name, None, w_parameters) - - # create the return value - ret_w = [] - if self.bindList: - for v in self.bindList: - assert isinstance(v, interp_variable.W_Variable) - ret_w.append(v.getValue(space, 0)) - return space.newlist(ret_w) - - def _call(self, space, name, retvar, w_args): - # determine the number of arguments passed - if w_args: - numArguments = space.len_w(w_args) - else: - numArguments = 0 - - # make sure we are actually open - self._checkOpen(space) - - # add the return value, if applicable - if retvar: - offset = 1 - w_vars = space.newlist([retvar]) - if w_args: - space.call_method(w_vars, "extend", w_args) - else: - offset = 0 - w_vars = w_args - - # build up the statement - args = ', '.join([':%d' % (i + offset + 1,) - for i in range(numArguments)]) - if retvar: - stmt = "begin :1 := %s(%s); end;" % (name, args) - else: - stmt = "begin %s(%s); end;" % (name, args) - - self._execute(space, space.wrap(stmt), w_vars) - - def _checkOpen(self, space): - if not self.isOpen: - raise OperationError( - interp_error.get(space).w_InterfaceError, - space.wrap("not open")) - - def allocateHandle(self): - handleptr = lltype.malloc(rffi.CArrayPtr(roci.OCIStmt).TO, - 1, flavor='raw') - try: - status = roci.OCIHandleAlloc( - self.environment.handle, - handleptr, roci.OCI_HTYPE_STMT, 0, - lltype.nullptr(rffi.CArray(roci.dvoidp))) - self.environment.checkForError( - status, "Cursor_New()") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - self.isOwned = True - - def freeHandle(self, space, raiseError=True): - if not self.handle: - return - if self.isOwned: - roci.OCIHandleFree(self.handle, roci.OCI_HTYPE_STMT) - elif self.connection.handle: - tagBuffer = StringBuffer() - tagBuffer.fill(space, self.w_statementTag) - try: - status = roci.OCIStmtRelease( - self.handle, self.environment.errorHandle, - tagBuffer.ptr, tagBuffer.size, - roci.OCI_DEFAULT) - self.environment.checkForError( - status, "Cursor_FreeHandle()") - finally: - tagBuffer.clear() - - def _internalPrepare(self, space, w_stmt, w_tag): - # make sure we don't get a situation where nothing is to be executed - if w_stmt is None and self.w_statement is None: - raise OperationError( - interp_error.get(space).w_ProgrammingError, - space.wrap("no statement specified " - "and no prior statement prepared")) - - # nothing to do if the statement is identical to the one already stored - # but go ahead and prepare anyway for create, alter and drop statments - if w_stmt is None or w_stmt == self.w_statement: - if self.statementType not in (roci.OCI_STMT_CREATE, - roci.OCI_STMT_DROP, - roci.OCI_STMT_ALTER): - return - w_stmt = self.w_statement - else: - self.w_statement = w_stmt - - # release existing statement, if necessary - self.w_statementTag = w_tag - self.freeHandle(space) - - # prepare statement - self.isOwned = False - handleptr = lltype.malloc(roci.Ptr(roci.OCIStmt).TO, - 1, flavor='raw') - stmtBuffer = StringBuffer() - tagBuffer = StringBuffer() - stmtBuffer.fill(space, w_stmt) - tagBuffer.fill(space, w_tag) - try: - status = roci.OCIStmtPrepare2( - self.connection.handle, handleptr, - self.environment.errorHandle, - stmtBuffer.ptr, stmtBuffer.size, - tagBuffer.ptr, tagBuffer.size, - roci.OCI_NTV_SYNTAX, roci.OCI_DEFAULT) - - self.environment.checkForError( - status, "Connection_InternalPrepare(): prepare") - self.handle = handleptr[0] - finally: - lltype.free(handleptr, flavor='raw') - stmtBuffer.clear() - tagBuffer.clear() - - # clear bind variables, if applicable - if not self.setInputSizes: - self.bindList = None - self.bindDict = None - - # clear row factory, if applicable - self.rowFactory = None - - # determine if statement is a query - self._getStatementType() - - def _setErrorOffset(self, space, e): - if e.match(space, get(space).w_DatabaseError): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, 1, flavor='raw') - try: - roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARSE_ERROR_OFFSET, - self.environment.errorHandle) - e.offset = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - def _internalExecute(self, space, numIters): - if self.connection.autocommit: - mode = roci.OCI_COMMIT_ON_SUCCESS - else: - mode = roci.OCI_DEFAULT - - status = roci.OCIStmtExecute( - self.connection.handle, - self.handle, - self.environment.errorHandle, - numIters, 0, - lltype.nullptr(roci.OCISnapshot.TO), - lltype.nullptr(roci.OCISnapshot.TO), - mode) - try: - self.environment.checkForError( - status, "Cursor_InternalExecute()") - except OperationError, e: - self._setErrorOffset(space, e) - raise - finally: - self._setRowCount() - - def _getStatementType(self): - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_STMT_TYPE, - self.environment.errorHandle) - - self.environment.checkForError( - status, "Cursor_GetStatementType()") - self.statementType = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - self.fetchVariables = None - - def getDescription(self, space): - "Return a list of 7-tuples consisting of the description of " - "the define variables" - - # make sure the cursor is open - self._checkOpen(space) - - # fixup bound cursor, if necessary - self._fixupBoundCursor() - - # if not a query, return None - if self.statementType != roci.OCI_STMT_SELECT: - return - - # determine number of items in select-list - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub1).TO, 1, flavor='raw') - try: - status = roci.OCIAttrGet( - self.handle, roci.OCI_HTYPE_STMT, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PARAM_COUNT, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_GetDescription()") - numItems = attrptr[0] - finally: - lltype.free(attrptr, flavor='raw') - - return space.newlist( - [space.newtuple(self._itemDescription(space, i + 1)) - for i in range(numItems)]) - - def _itemDescription(self, space, pos): - "Return a tuple describing the item at the given position" - - # acquire parameter descriptor - paramptr = lltype.malloc(roci.Ptr(roci.OCIParam).TO, - 1, flavor='raw') - try: - status = roci.OCIParamGet( - self.handle, roci.OCI_HTYPE_STMT, - self.environment.errorHandle, - rffi.cast(roci.dvoidpp, paramptr), - pos) - self.environment.checkForError( - status, - "Cursor_GetDescription(): parameter") - param = paramptr[0] - finally: - lltype.free(paramptr, flavor='raw') - - try: - # acquire usable type of item - varType = interp_variable.typeByOracleDescriptor( - param, self.environment) - - # acquire internal size of item - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_DATA_SIZE, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): internal size") - internalSize = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - # acquire name of item - nameptr = lltype.malloc(rffi.CArrayPtr(roci.oratext).TO, 1, - flavor='raw') - lenptr = lltype.malloc(rffi.CArrayPtr(roci.ub4).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, nameptr), - lenptr, - roci.OCI_ATTR_NAME, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): name") - name = rffi.charpsize2str(nameptr[0], rffi.cast(lltype.Signed, lenptr[0])) - finally: - lltype.free(nameptr, flavor='raw') - lltype.free(lenptr, flavor='raw') - - # lookup precision and scale - if varType is interp_variable.VT_Float: - attrptr = lltype.malloc(rffi.CArrayPtr(roci.sb1).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_SCALE, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): scale") - scale = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub2).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_PRECISION, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): precision") - precision = rffi.cast(lltype.Signed, attrptr[0]) - finally: - lltype.free(attrptr, flavor='raw') - else: - scale = 0 - precision = 0 - - # lookup whether null is permitted for the attribute - attrptr = lltype.malloc(rffi.CArrayPtr(roci.ub1).TO, 1, - flavor='raw') - try: - status = roci.OCIAttrGet( - param, roci.OCI_HTYPE_DESCRIBE, - rffi.cast(roci.dvoidp, attrptr), - lltype.nullptr(roci.Ptr(roci.ub4).TO), - roci.OCI_ATTR_IS_NULL, - self.environment.errorHandle) - self.environment.checkForError( - status, - "Cursor_ItemDescription(): nullable") - nullable = rffi.cast(lltype.Signed, attrptr[0]) != 0 - finally: - lltype.free(attrptr, flavor='raw') - - # set display size based on data type - if varType is interp_variable.VT_String: - displaySize = internalSize - elif varType is interp_variable.VT_NationalCharString: - displaySize = internalSize / 2 - elif varType is interp_variable.VT_Binary: - displaySize = internalSize - elif varType is interp_variable.VT_FixedChar: - displaySize = internalSize - elif varType is interp_variable.VT_FixedNationalChar: - displaySize = internalSize / 2 - elif varType is interp_variable.VT_Float: - if precision: - displaySize = precision + 1 - if scale > 0: - displaySize += scale + 1 - else: - displaySize = 127 - elif varType is interp_variable.VT_DateTime: - displaySize = 23 - else: - displaySize = -1 - - # return the tuple - return [space.wrap(name), space.gettypeobject(varType.typedef), - space.wrap(displaySize), space.wrap(internalSize), - space.wrap(precision), space.wrap(scale), - space.wrap(nullable)] - - finally: - roci.OCIDescriptorFree(param, roci.OCI_DTYPE_PARAM) - - def _setBindVariablesByPos(self, space, - w_vars, numElements, arrayPos, defer): - "handle positional binds" - # make sure positional and named binds are not being intermixed - if self.bindDict is not None: - raise OperationError( - get(space).w_ProgrammingError, - space.wrap("positional and named binds cannot be intermixed")) - - if self.bindList is None: - self.bindList = [] - - vars_w = space.fixedview(w_vars) - for i in range(len(vars_w)): - w_value = vars_w[i] - if i < len(self.bindList): - origVar = self.bindList[i] - if space.is_w(origVar, space.w_None): - origVar = None - else: - origVar = None - newVar = self._setBindVariableHelper(space, w_value, origVar, - numElements, arrayPos, defer) - if newVar: - if i < len(self.bindList): - self.bindList[i] = newVar - else: - assert i == len(self.bindList) - self.bindList.append(newVar) - - def _setBindVariablesByName(self, space, - w_vars, numElements, arrayPos, defer): - "handle named binds" - # make sure positional and named binds are not being intermixed From noreply at buildbot.pypy.org Thu May 1 09:26:20 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:20 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix translator error Message-ID: <20140501072620.E9F801C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71125:858c48e2bc4d Date: 2014-04-30 23:51 -0700 http://bitbucket.org/pypy/pypy/changeset/858c48e2bc4d/ Log: fix translator error diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -498,8 +498,8 @@ collection of (possibly) overloaded methods or functions. It calls these in order and deals with error handling and reporting.""" - _attrs_ = ['space', 'functions'] - _immutable_fields_ = ['functions[*]'] + _attrs_ = ['space', 'scope', 'functions'] + _immutable_fields_ = ['scope', 'functions[*]'] def __init__(self, space, declaring_scope, functions): self.space = space From noreply at buildbot.pypy.org Thu May 1 09:26:22 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 1 May 2014 09:26:22 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support into default: this opens several more tests based on the dummy backend Message-ID: <20140501072622.1997A1C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r71126:d6759a0aff01 Date: 2014-05-01 00:25 -0700 http://bitbucket.org/pypy/pypy/changeset/d6759a0aff01/ Log: merge reflex-support into default: this opens several more tests based on the dummy backend diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -16,7 +16,7 @@ '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', - 'CPPInstance' : 'interp_cppyy.W_CPPInstance', + 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', } @@ -25,7 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', - 'Template' : 'pythonify.CppyyTemplateType', + 'Template' : 'pythonify.CPPTemplate', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -127,19 +127,18 @@ argc = len(args_w) try: - # Note: argcount is +1 for the class (== w_self) - if argc < 5 or 6 < argc: + if argc < 4 or 5 < argc: raise TypeError("wrong number of arguments") - # second argument must be a name - funcname = space.str_w(args_w[1]) + # first argument must be a name + funcname = space.str_w(args_w[0]) # last (optional) argument is number of parameters npar = 0 - if argc == 6: npar = space.int_w(args_w[5]) + if argc == 5: npar = space.int_w(args_w[4]) - # third argument must be a callable python object - w_callable = args_w[2] + # second argument must be a callable python object + w_callable = args_w[1] if not space.is_true(space.callable(w_callable)): raise TypeError("2nd argument is not a valid python callable") @@ -159,17 +158,21 @@ # so far, so good; leaves on issue: CINT is expecting a wrapper, but # we need the overload that takes a function pointer, which is not in # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, - space.float_w(args_w[3]), space.float_w(args_w[4]), npar) - - from pypy.module.cppyy import interp_cppyy - w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, - do_cast=False, python_owns=True, fresh=True) + newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, + space.float_w(args_w[2]), space.float_w(args_w[3]), npar) + + # w_self is a null-ptr bound as TF1 + from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator + cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) + cppself._rawobject = newinst + memory_regulator.register(cppself) # tie all the life times to the TF1 instance - space.setattr(w_instance, space.wrap('_callback'), w_callback) + space.setattr(w_self, space.wrap('_callback'), w_callback) - return w_instance + # by definition for __init__ + return None + except (OperationError, TypeError, IndexError), e: newargs_w = args_w[1:] # drop class @@ -312,7 +315,7 @@ # location w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.buffer_w(w_address) + buf = space.getarg_w('s*', w_address) from pypy.module._rawffi import buffer assert isinstance(buf, buffer.RawFFIBuffer) address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) @@ -395,7 +398,7 @@ _method_alias(space, w_pycppclass, "__len__", "GetSize") elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -155,18 +155,16 @@ the memory_regulator.""" _attrs_ = ['space', 'scope', 'index', 'cppmethod', 'arg_defs', 'args_required', - 'args_expected', 'converters', 'executor', '_funcaddr', 'cif_descr', - 'uses_local'] + 'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local'] _immutable_ = True - def __init__(self, space, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, declaring_scope, method_index, arg_defs, args_required): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.index = method_index self.cppmethod = capi.c_get_method(self.space, self.scope, method_index) self.arg_defs = arg_defs self.args_required = args_required - self.args_expected = len(arg_defs) # Setup of the method dispatch's innards is done lazily, i.e. only when # the method is actually used. @@ -176,6 +174,12 @@ self._funcaddr = lltype.nullptr(rffi.VOIDP.TO) self.uses_local = False + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) + cppinstance._nullcheck() + return cppinstance.get_cppthis(declaring_scope) + def _address_from_local_buffer(self, call_local, idx): if not call_local: return call_local @@ -277,7 +281,7 @@ funcaddr = methgetter(rffi.cast(capi.C_OBJECT, cppthis)) self._funcaddr = rffi.cast(rffi.VOIDP, funcaddr) - nargs = self.args_expected + 1 # +1: cppthis + nargs = len(self.arg_defs) + 1 # +1: cppthis # memory block for CIF description (note: not tracked as the life # time of methods is normally the duration of the application) @@ -335,7 +339,7 @@ # extra cif_descr.abi = clibffi.FFI_DEFAULT_ABI - cif_descr.nargs = self.args_expected + 1 # +1: cppthis + cif_descr.nargs = len(self.arg_defs) + 1 # +1: cppthis res = jit_libffi.jit_ffi_prep_cif(cif_descr) if res != clibffi.FFI_OK: @@ -405,28 +409,29 @@ class CPPFunction(CPPMethod): - """Global (namespaced) function dispatcher. For now, the base class has - all the needed functionality, by allowing the C++ this pointer to be null - in the call. An optimization is expected there, however.""" + """Global (namespaced) function dispatcher.""" _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return capi.C_NULL_OBJECT + def __repr__(self): return "CPPFunction: %s" % self.signature() class CPPTemplatedCall(CPPMethod): - """Method dispatcher that first needs to resolve the template instance. - Note that the derivation is from object: the CPPMethod is a data member.""" + """Method dispatcher that first resolves the template instance.""" - _attrs_ = ['space', 'templ_args', 'method'] + _attrs_ = ['space', 'templ_args'] _immutable_ = True - def __init__(self, space, templ_args, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, templ_args, declaring_scope, method_index, arg_defs, args_required): self.space = space self.templ_args = templ_args # TODO: might have to specialize for CPPTemplatedCall on CPPMethod/CPPFunction here - CPPMethod.__init__(self, space, containing_scope, method_index, arg_defs, args_required) + CPPMethod.__init__(self, space, declaring_scope, method_index, arg_defs, args_required) def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -456,24 +461,15 @@ _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return rffi.cast(capi.C_OBJECT, declaring_scope.handle) + def call(self, cppthis, args_w): - # TODO: these casts are very, very un-pretty; need to find a way of - # re-using CPPMethod's features w/o these roundabouts - vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - cppinstance = None - try: - cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) - use_args_w = args_w[1:] - except (OperationError, TypeError), e: - use_args_w = args_w - w_result = CPPMethod.call(self, vscope, use_args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) - if cppinstance: - cppinstance._rawobject = newthis - memory_regulator.register(cppinstance) - return args_w[0] - return wrap_cppobject(self.space, newthis, self.scope, - do_cast=False, python_owns=True, fresh=True) + # Note: this does not return a wrapped instance, just a pointer to the + # new instance; the overload must still wrap it before returning. Also, + # cppthis is declaring_scope.handle (as per unpack_cppthis(), above). + return CPPMethod.call(self, cppthis, args_w) def __repr__(self): return "CPPConstructor: %s" % self.signature() @@ -505,9 +501,10 @@ _attrs_ = ['space', 'scope', 'functions'] _immutable_fields_ = ['scope', 'functions[*]'] - def __init__(self, space, containing_scope, functions): + def __init__(self, space, declaring_scope, functions): self.space = space - self.scope = containing_scope + self.scope = declaring_scope + assert len(functions) from rpython.rlib import debug self.functions = debug.make_sure_not_resized(functions) @@ -520,12 +517,10 @@ @jit.unroll_safe @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) - if cppinstance is not None: - cppinstance._nullcheck() - cppthis = cppinstance.get_cppthis(self.scope) - else: - cppthis = capi.C_NULL_OBJECT + # instance handling is specific to the function type only, so take it out + # of the loop over function overloads + cppthis = self.functions[0].unpack_cppthis( + self.space, w_cppinstance, self.functions[0].scope) assert lltype.typeOf(cppthis) == capi.C_OBJECT # The following code tries out each of the functions in order. If @@ -585,6 +580,39 @@ ) +class W_CPPConstructorOverload(W_CPPOverload): + @jit.elidable_promote() + def is_static(self): + return self.space.w_False + + @jit.elidable_promote() + def unpack_cppthis(self, w_cppinstance): + return rffi.cast(capi.C_OBJECT, self.scope.handle) + + @jit.unroll_safe + @unwrap_spec(args_w='args_w') + def call(self, w_cppinstance, args_w): + w_result = W_CPPOverload.call(self, w_cppinstance, args_w) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if cppinstance is not None: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return w_cppinstance + return wrap_cppobject(self.space, newthis, self.functions[0].scope, + do_cast=False, python_owns=True, fresh=True) + + def __repr__(self): + return "W_CPPConstructorOverload(%s)" % [f.signature() for f in self.functions] + +W_CPPConstructorOverload.typedef = TypeDef( + 'CPPConstructorOverload', + is_static = interp2app(W_CPPConstructorOverload.is_static), + call = interp2app(W_CPPConstructorOverload.call), + signature = interp2app(W_CPPOverload.signature), +) + + class W_CPPBoundMethod(W_Root): _attrs_ = ['cppthis', 'method'] @@ -605,9 +633,9 @@ _attrs_ = ['space', 'scope', 'converter', 'offset'] _immutable_fields = ['scope', 'converter', 'offset'] - def __init__(self, space, containing_scope, type_name, offset): + def __init__(self, space, declaring_scope, type_name, offset): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.converter = converter.get_converter(self.space, type_name, '') self.offset = offset @@ -717,7 +745,10 @@ # create the overload methods from the method sets for pyname, methods in methods_temp.iteritems(): CPPMethodSort(methods).sort() - overload = W_CPPOverload(self.space, self, methods[:]) + if pyname == self.name: + overload = W_CPPConstructorOverload(self.space, self, methods[:]) + else: + overload = W_CPPOverload(self.space, self, methods[:]) self.methods[pyname] = overload def full_name(self): @@ -857,14 +888,13 @@ class W_CPPClass(W_CPPScope): - _attrs_ = ['space', 'default_constructor', 'name', 'handle', 'methods', 'datamembers'] - _immutable_fields_ = ['kind', 'default_constructor', 'methods[*]', 'datamembers[*]'] + _attrs_ = ['space', 'name', 'handle', 'methods', 'datamembers'] + _immutable_fields_ = ['kind', 'constructor', 'methods[*]', 'datamembers[*]'] kind = "class" def __init__(self, space, name, opaque_handle): W_CPPScope.__init__(self, space, name, opaque_handle) - self.default_constructor = None def _make_cppfunction(self, pyname, index): num_args = capi.c_method_num_args(self.space, self, index) @@ -876,8 +906,6 @@ arg_defs.append((arg_type, arg_dflt)) if capi.c_is_constructor(self.space, self, index): cppfunction = CPPConstructor(self.space, self, index, arg_defs, args_required) - if args_required == 0: - self.default_constructor = cppfunction elif capi.c_method_is_template(self.space, self, index): templ_args = capi.c_template_args(self.space, self, index) cppfunction = CPPTemplatedCall(self.space, templ_args, self, index, arg_defs, args_required) @@ -905,9 +933,7 @@ self.datamembers[datamember_name] = datamember def construct(self): - if self.default_constructor is not None: - return self.default_constructor.call(capi.C_NULL_OBJECT, []) - raise self.missing_attribute_error("default_constructor") + return self.get_overload(self.name).call(None, []) def find_overload(self, name): raise self.missing_attribute_error(name) @@ -1046,6 +1072,16 @@ raise return None + def instance__init__(self, args_w): + try: + constructor_overload = self.cppclass.get_overload(self.cppclass.name) + constructor_overload.call(self, args_w) + except OperationError, e: + if not e.match(self.space, self.space.w_AttributeError): + raise + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name)) + def instance__eq__(self, w_other): # special case: if other is None, compare pointer-style if self.space.is_w(w_other, self.space.w_None): @@ -1128,6 +1164,7 @@ 'CPPInstance', cppclass = interp_attrproperty('cppclass', cls=W_CPPInstance), _python_owns = GetSetProperty(W_CPPInstance.fget_python_owns, W_CPPInstance.fset_python_owns), + __init__ = interp2app(W_CPPInstance.instance__init__), __eq__ = interp2app(W_CPPInstance.instance__eq__), __ne__ = interp2app(W_CPPInstance.instance__ne__), __nonzero__ = interp2app(W_CPPInstance.instance__nonzero__), diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -7,7 +7,7 @@ # with info from multiple dictionaries and do not need to bother with meta # classes for inheritance. Both are python classes, though, and refactoring # may be in order at some point. -class CppyyScopeMeta(type): +class CPPScope(type): def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self @@ -15,16 +15,16 @@ raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) -class CppyyNamespaceMeta(CppyyScopeMeta): +class CPPNamespace(CPPScope): def __dir__(cls): return cls._cpp_proxy.__dir__() -class CppyyClassMeta(CppyyScopeMeta): +class CPPClass(CPPScope): pass -# class CppyyClass defined in _init_pythonify() +# class CPPInstance defined in _init_pythonify() -class CppyyTemplateType(object): +class CPPTemplate(object): def __init__(self, name, scope=None): self._name = name if scope is None: @@ -91,7 +91,7 @@ # build up a representation of a C++ namespace (namespaces are classes) # create a meta class to allow properties (for static data write access) - metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) + metans = type(CPPNamespace)(namespace_name+'_meta', (CPPNamespace,), {}) if cppns: d = {"_cpp_proxy" : cppns} @@ -137,21 +137,14 @@ break return tuple(bases) -def make_new(class_name, cppclass): - try: - constructor_overload = cppclass.get_overload(cppclass.type_name) - except AttributeError: - msg = "cannot instantiate abstract class '%s'" % class_name - def __new__(cls, *args): - raise TypeError(msg) - else: - def __new__(cls, *args): - # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) - if not instance.__class__ is cls: - instance.__class__ = cls # happens for derived class - return instance +def make_new(class_name): + def __new__(cls, *args): + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -159,7 +152,7 @@ # get a list of base classes for class creation bases = [get_pycppclass(base) for base in cppclass.get_base_names()] if not bases: - bases = [CppyyClass,] + bases = [CPPInstance,] else: # it's technically possible that the required class now has been built # if one of the base classes uses it in e.g. a function interface @@ -170,7 +163,7 @@ # create a meta class to allow properties (for static data write access) metabases = [type(base) for base in bases] - metacpp = type(CppyyClassMeta)(class_name+'_meta', _drop_cycles(metabases), {}) + metacpp = type(CPPClass)(class_name+'_meta', _drop_cycles(metabases), {}) # create the python-side C++ class representation def dispatch(self, name, signature): @@ -178,7 +171,7 @@ return types.MethodType(make_method(name, cppol), self, type(self)) d = {"_cpp_proxy" : cppclass, "__dispatch__" : dispatch, - "__new__" : make_new(class_name, cppclass), + "__new__" : make_new(class_name), } pycppclass = metacpp(class_name, _drop_cycles(bases), d) @@ -214,7 +207,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(template_name, scope) + return CPPTemplate(template_name, scope) def get_pycppitem(scope, name): @@ -426,15 +419,12 @@ # at pypy-c startup, rather than on the "import cppyy" statement import cppyy - # top-level classes - global CppyyClass - class CppyyClass(cppyy.CPPInstance): - __metaclass__ = CppyyClassMeta - - def __init__(self, *args, **kwds): - # self is only a placeholder; now create the actual C++ object - args = (self,) + args - self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) + # root of all proxy classes: CPPInstance in pythonify exists to combine the + # CPPClass meta class with the interp-level CPPInstanceBase + global CPPInstance + class CPPInstance(cppyy.CPPInstanceBase): + __metaclass__ = CPPClass + pass # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -4,17 +4,22 @@ #include #include #include +#include #include #include +#include #include #include +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + // add example01.cxx code int globalAddOneToInt(int a); namespace dummy { #include "example01.cxx" +#include "datatypes.cxx" } int globalAddOneToInt(int a) { @@ -27,168 +32,307 @@ typedef std::map Handles_t; static Handles_t s_handles; +enum EMethodType { kNormal=0, kConstructor=1, kStatic=2 }; + struct Cppyy_PseudoMethodInfo { Cppyy_PseudoMethodInfo(const std::string& name, const std::vector& argtypes, - const std::string& returntype) : - m_name(name), m_argtypes(argtypes), m_returntype(returntype) {} + const std::string& returntype, + EMethodType mtype = kNormal) : + m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} std::string m_name; std::vector m_argtypes; std::string m_returntype; + EMethodType m_type; +}; + +struct Cppyy_PseudoDatambrInfo { + Cppyy_PseudoDatambrInfo(const std::string& name, + const std::string& type, + size_t offset, bool isstatic) : + m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} + + std::string m_name; + std::string m_type; + size_t m_offset; + bool m_isstatic; }; struct Cppyy_PseudoClassInfo { Cppyy_PseudoClassInfo() {} - Cppyy_PseudoClassInfo(const std::vector& methods) : - m_methods(methods ) {} + Cppyy_PseudoClassInfo(const std::vector& methods, + long method_offset, + const std::vector& data) : + m_methods(methods), m_method_offset(method_offset), m_datambrs(data) {} std::vector m_methods; + long m_method_offset; + std::vector m_datambrs; }; typedef std::map Scopes_t; static Scopes_t s_scopes; -static int example01_last_static_method = 0; -static int example01_last_constructor = 0; -static int payload_methods_offset = 0; +static std::map s_methods; + +#define PUBLIC_CPPYY_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ + offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname, argtypes, #dmtype)); \ + s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + argtypes.push_back(#dmtype); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname, argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + argtypes.clear(); \ + argtypes.push_back("const "#dmtype"&"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname"_c", argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ + PUBLIC_CPPYY_DATA(dmname, dmtype); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array2", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ + PUBLIC_CPPYY_DATA2(dmname, dmtype); \ + argtypes.push_back(#dmtype"*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + argtypes.clear(); argtypes.push_back("void*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_void_array_"#key, argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + +#define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ + (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- - static long s_scope_id = 0; + static long s_scope_id = 0; + static long s_method_id = 0; { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // ( 0) static double staticAddToDouble(double a) + // static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double", kStatic)); + s_methods["static_example01::staticAddToDouble_double"] = s_method_id++; - // ( 1) static int staticAddOneToInt(int a) - // ( 2) static int staticAddOneToInt(int a, int b) + // static int staticAddOneToInt(int a) + // static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int_int"] = s_method_id++; - // ( 3) static int staticAtoi(const char* str) + // static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int", kStatic)); + s_methods["static_example01::staticAtoi_cchar*"] = s_method_id++; - // ( 4) static char* staticStrcpy(const char* strin) - methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // static char* staticStrcpy(const char* strin) + methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*", kStatic)); + s_methods["static_example01::staticStrcpy_cchar*"] = s_method_id++; - // ( 5) static void staticSetPayload(payload* p, double d) - // ( 6) static payload* staticCyclePayload(payload* p, double d) - // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + // static void staticSetPayload(payload* p, double d) + // static payload* staticCyclePayload(payload* p, double d) + // static payload staticCopyCyclePayload(payload* p, double d) argtypes.clear(); argtypes.push_back("payload*"); argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void", kStatic)); + s_methods["static_example01::staticSetPayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*", kStatic)); + s_methods["static_example01::staticCyclePayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload", kStatic)); + s_methods["static_example01::staticCopyCyclePayload_payload*_double"] = s_method_id++; - // ( 8) static int getCount() - // ( 9) static void setCount(int) + // static int getCount() + // static void setCount(int) argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int", kStatic)); + s_methods["static_example01::getCount"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void", kStatic)); + s_methods["static_example01::setCount_int"] = s_method_id++; - // cut-off is used in cppyy_is_static - example01_last_static_method = methods.size(); + // example01() + // example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01"] = s_method_id++; + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01_int"] = s_method_id++; - // (10) example01() - // (11) example01(int a) - argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - - // cut-off is used in cppyy_is_constructor - example01_last_constructor = methods.size(); - - // (12) int addDataToInt(int a) + // int addDataToInt(int a) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + s_methods["example01::addDataToInt_int"] = s_method_id++; - // (13) int addDataToIntConstRef(const int& a) + // int addDataToIntConstRef(const int& a) argtypes.clear(); argtypes.push_back("const int&"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + s_methods["example01::addDataToIntConstRef_cint&"] = s_method_id++; - // (14) int overloadedAddDataToInt(int a, int b) + // int overloadedAddDataToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int"] = s_method_id++; - // (15) int overloadedAddDataToInt(int a) - // (16) int overloadedAddDataToInt(int a, int b, int c) + // int overloadedAddDataToInt(int a) + // int overloadedAddDataToInt(int a, int b, int c) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); - + s_methods["example01::overloadedAddDataToInt_int"] = s_method_id++; argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int_int"] = s_method_id++; - // (17) double addDataToDouble(double a) + // double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + s_methods["example01::addDataToDouble_double"] = s_method_id++; - // (18) int addDataToAtoi(const char* str) - // (19) char* addToStringValue(const char* str) + // int addDataToAtoi(const char* str) + // char* addToStringValue(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + s_methods["example01::addDataToAtoi_cchar*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + s_methods["example01::addToStringValue_cchar*"] = s_method_id++; - // (20) void setPayload(payload* p) - // (21) payload* cyclePayload(payload* p) - // (22) payload copyCyclePayload(payload* p) + // void setPayload(payload* p) + // payload* cyclePayload(payload* p) + // payload copyCyclePayload(payload* p) argtypes.clear(); argtypes.push_back("payload*"); methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + s_methods["example01::setPayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + s_methods["example01::cyclePayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + s_methods["example01::copyCyclePayload_payload*"] = s_method_id++; - payload_methods_offset = methods.size(); - - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class example01 + //==================================================================== + { // class payload -- s_handles["payload"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // (23) payload(double d = 0.) + // payload(double d = 0.) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor", kConstructor)); + s_methods["payload::payload_double"] = s_method_id++; - // (24) double getData() + // double getData() argtypes.clear(); methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + s_methods["payload::getData"] = s_method_id++; - // (25) void setData(double d) + // void setData(double d) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + s_methods["payload::setData_double"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class payload + + //==================================================================== + + { // class cppyy_test_data -- + s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // cppyy_test_data() + std::vector argtypes; + methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); + s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + + methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); + s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + + std::vector data; + PUBLIC_CPPYY_DATA2(bool, bool); + PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (uchar, unsigned char); + PUBLIC_CPPYY_DATA3(short, short, h); + PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); + PUBLIC_CPPYY_DATA3(int, int, i); + PUBLIC_CPPYY_DATA3(uint, unsigned int, I); + PUBLIC_CPPYY_DATA3(long, long, l); + PUBLIC_CPPYY_DATA3(ulong, unsigned long, L); + PUBLIC_CPPYY_DATA (llong, long long); + PUBLIC_CPPYY_DATA (ullong, unsigned long long); + PUBLIC_CPPYY_DATA3(float, float, f); + PUBLIC_CPPYY_DATA3(double, double, d); + PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (voidp, void*); + + PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); + PUBLIC_CPPYY_STATIC_DATA(short, short); + PUBLIC_CPPYY_STATIC_DATA(ushort, unsigned short); + PUBLIC_CPPYY_STATIC_DATA(int, int); + PUBLIC_CPPYY_STATIC_DATA(uint, unsigned int); + PUBLIC_CPPYY_STATIC_DATA(long, long); + PUBLIC_CPPYY_STATIC_DATA(ulong, unsigned long); + PUBLIC_CPPYY_STATIC_DATA(llong, long long); + PUBLIC_CPPYY_STATIC_DATA(ullong, unsigned long long); + PUBLIC_CPPYY_STATIC_DATA(float, float); + PUBLIC_CPPYY_STATIC_DATA(double, double); + PUBLIC_CPPYY_STATIC_DATA(enum, cppyy_test_data::what); + PUBLIC_CPPYY_STATIC_DATA(voidp, void*); + + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size(), data); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class cppyy_test_data + } } _init; @@ -230,155 +374,387 @@ /* method/function dispatching -------------------------------------------- */ void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - switch ((long)method) { - case 5: // static void example01:;staticSetPayload(payload* p, double d) + long idx = (long)method; + if (idx == s_methods["static_example01::staticSetPayload_payload*_double"]) { assert(!self && nargs == 2); dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 9: // static void example01::setCount(int) + } else if (idx == s_methods["static_example01::setCount_int"]) { assert(!self && nargs == 1); dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); - break; - case 20: // void example01::setPayload(payload* p); + } else if (idx == s_methods["example01::setPayload_payload*"]) { assert(self && nargs == 1); ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::destroy_arrays"]) { + assert(self && nargs == 0); + ((dummy::cppyy_test_data*)self)->destroy_arrays(); + } else if (idx == s_methods["cppyy_test_data::set_bool"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_char"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["cppyy_test_data::set_uchar"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); + } else if (idx == s_methods["cppyy_test_data::set_short"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); + } else if (idx == s_methods["cppyy_test_data::set_short_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short_c(*(short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ushort"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); + } else if (idx == s_methods["cppyy_test_data::set_ushort_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort_c(*(unsigned short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_int"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_int_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int_c(*(int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_uint"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["cppyy_test_data::set_uint_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint_c(*(unsigned int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_long"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["cppyy_test_data::set_long_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long_c(*(long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ulong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); + } else if (idx == s_methods["cppyy_test_data::set_ulong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_llong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); + } else if (idx == s_methods["cppyy_test_data::set_llong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong_c(*(long long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ullong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); + } else if (idx == s_methods["cppyy_test_data::set_ullong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_float"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); + } else if (idx == s_methods["cppyy_test_data::set_float_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float_c(*(float*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_double"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::set_double_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double_c(*(double*)&((CPPYY_G__value*)args)[0]); + } else { assert(!"method unknown in cppyy_call_v"); - break; } } +unsigned char cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + unsigned char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_bool"]) { + assert(self && nargs == 0); + result = (unsigned char)((dummy::cppyy_test_data*)self)->get_bool(); + } else { + assert(!"method unknown in cppyy_call_b"); + } + return result; +} + +char cppyy_call_c(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_char"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_char(); + } else if (idx == s_methods["cppyy_test_data::get_uchar"]) { + assert(self && nargs == 0); + result = (char)((dummy::cppyy_test_data*)self)->get_uchar(); + } else { + assert(!"method unknown in cppyy_call_c"); + } + return result; +} + +short cppyy_call_h(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + short result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_short"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_short(); + } else if (idx == s_methods["cppyy_test_data::get_ushort"]) { + assert(self && nargs == 0); + result = (short)((dummy::cppyy_test_data*)self)->get_ushort(); + } else { + assert(!"method unknown in cppyy_call_h"); + } + return result; +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; - switch ((long)method) { - case 1: // static int example01::staticAddOneToInt(int) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddOneToInt_int"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 2: // static int example01::staticAddOneToInt(int, int) + } else if (idx == s_methods["static_example01::staticAddOneToInt_int_int"]) { assert(!self && nargs == 2); result = dummy::example01::staticAddOneToInt( ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); - break; - case 3: // static int example01::staticAtoi(const char* str) + } else if (idx == s_methods["static_example01::staticAtoi_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 8: // static int example01::getCount() + } else if (idx == s_methods["static_example01::getCount"]) { assert(!self && nargs == 0); result = dummy::example01::getCount(); - break; - case 12: // int example01::addDataToInt(int a) + } else if (idx == s_methods["example01::addDataToInt_int"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 18: // int example01::addDataToAtoi(const char* str) + } else if (idx == s_methods["example01::addDataToAtoi_cchar*"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_int"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_int(); + } else { assert(!"method unknown in cppyy_call_i"); - break; } return result; } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { long result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = (long)dummy::example01::staticStrcpy( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + } else if (idx == s_methods["static_example01::staticCyclePayload_payload*_double"]) { assert(!self && nargs == 2); result = (long)dummy::example01::staticCyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 19: // char* example01::addToStringValue(const char* str) + } else if (idx == s_methods["example01::addToStringValue_cchar*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->addToStringValue( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 21: // payload* example01::cyclePayload(payload* p) + } else if (idx == s_methods["example01::cyclePayload_payload*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_uint"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint(); + } else if (idx == s_methods["cppyy_test_data::get_long"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_long(); + } else if (idx == s_methods["cppyy_test_data::get_ulong"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array2(); + } else if (idx == s_methods["cppyy_test_data::get_short_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array(); + } else if (idx == s_methods["cppyy_test_data::get_short_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array2(); + } else if (idx == s_methods["cppyy_test_data::get_int_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array(); + } else if (idx == s_methods["cppyy_test_data::get_int_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array2(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array2(); + } else if (idx == s_methods["cppyy_test_data::get_long_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array(); + } else if (idx == s_methods["cppyy_test_data::get_long_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array2(); + } else if (idx == s_methods["cppyy_test_data::pass_array_short"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_h"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_h( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ushort"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_H"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_H( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_int"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_i"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_i( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_uint"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_I"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_I( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_long"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_l"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_l( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ulong"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_L"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_L( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_float"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_f"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_f( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_double"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_d"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_d( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else { assert(!"method unknown in cppyy_call_l"); - break; } return result; } +long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + long long result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_llong"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_llong(); + } else if (idx == s_methods["cppyy_test_data::get_ullong"]) { + assert(self && nargs == 0); + result = (long long)((dummy::cppyy_test_data*)self)->get_ullong(); + } else { + assert(!"method unknown in cppyy_call_ll"); + } + return result; +} + +float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + float result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_float"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_float(); + } else { + assert(!"method unknown in cppyy_call_f"); + } + return result; +} + double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; - switch ((long)method) { - case 0: // static double example01::staticAddToDouble(double) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddToDouble_double"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); - break; - case 17: // double example01::addDataToDouble(double a) + } else if (idx == s_methods["example01::addDataToDouble_double"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); - break; - case 24: // double payload::getData() + } else if (idx == s_methods["payload::getData"]) { assert(self && nargs == 0); result = ((dummy::payload*)self)->getData(); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_double"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_double(); + } else { assert(!"method unknown in cppyy_call_d"); - break; } return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { char* result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else { assert(!"method unknown in cppyy_call_s"); - break; } return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { void* result = 0; - if (handle == s_handles["example01"]) { - switch ((long)method) { - case 10: - assert(nargs == 0); - result = new dummy::example01; - break; - case 11: - assert(nargs == 1); - result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); - break; - default: - assert(!"method of example01 unknown in cppyy_constructor"); - break; - } - } else if (handle == s_handles["payload"]) { - switch ((long)method) { - case 23: - if (nargs == 0) result = new dummy::payload; - else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); - break; - default: - assert(!"method payload unknown in cppyy_constructor"); - break; - } + const long idx = (long)method; + if (idx == s_methods["example01::example01"]) { + assert(nargs == 0); + result = new dummy::example01; + } else if (idx == s_methods["example01::example01_int"]) { + assert(nargs == 1); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["payload::payload_double"]) { + assert(nargs == 0 || nargs == 1); + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::cppyy_test_data"]) { + assert(nargs == 0); + result = new dummy::cppyy_test_data; + } else { + assert(!"method unknown in cppyy_constructor"); } return (cppyy_object_t)result; } @@ -486,10 +862,10 @@ } cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return (cppyy_method_t)method_index; - else if (handle == s_handles["payload"]) - return (cppyy_method_t)((long)method_index + payload_methods_offset); + if (s_scopes.find(handle) != s_scopes.end()) { + long id = s_scopes[handle].m_method_offset + (long)method_index; + return (cppyy_method_t)id; + } assert(!"unknown class in cppyy_get_method"); return (cppyy_method_t)0; } @@ -497,26 +873,45 @@ /* method properties ----------------------------------------------------- */ int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return example01_last_static_method <= method_index - && method_index < example01_last_constructor; - else if (handle == s_handles["payload"]) - return (long)method_index == 0; + if (s_scopes.find(handle) != s_scopes.end()) + return s_scopes[handle].m_methods[method_index].m_type == kConstructor; + assert(!"unknown class in cppyy_is_constructor"); return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return method_index < example01_last_static_method ? 1 : 0; - if (handle == s_handles["payload"]) - return 0; + if (s_scopes.find(handle) != s_scopes.end()) + return s_scopes[handle].m_methods[method_index].m_type == kStatic; + assert(!"unknown class in cppyy_is_staticmethod"); + return 0; +} + + +/* data member reflection information ------------------------------------- */ +int cppyy_num_datamembers(cppyy_scope_t handle) { + return s_scopes[handle].m_datambrs.size(); +} + +char* cppyy_datamember_name(cppyy_scope_t handle, int idatambr) { + return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_name); +} + +char* cppyy_datamember_type(cppyy_scope_t handle, int idatambr) { + return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); +} + +size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { + return s_scopes[handle].m_datambrs[idatambr].m_offset; +} + + +/* data member properties ------------------------------------------------ */ +int cppyy_is_publicdata(cppyy_scope_t handle, int idatambr) { return 1; } - -/* data member reflection information ------------------------------------- */ -int cppyy_num_datamembers(cppyy_scope_t /* handle */) { - return 0; +int cppyy_is_staticdata(cppyy_scope_t handle, int idatambr) { + return s_scopes[handle].m_datambrs[idatambr].m_isstatic; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,14 +7,18 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0] or \ - 'test_pythonify.py' in item.location[0]): + import os + tst = os.path.basename(item.location[0]) + if not tst in ('test_helper.py', 'test_cppyy.py', 'test_pythonify.py', + 'test_datatypes.py'): py.test.skip("genreflex is not installed") import re - if 'test_pythonify.py' in item.location[0] and \ + if tst == 'test_pythonify.py' and \ not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") + elif tst == 'test_datatypes.py' and \ + not re.search("AppTestDATATYPES.test0[1-8]", item.location[2]): + py.test.skip("genreflex is not installed") def pytest_ignore_collect(path, config): if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -16,9 +16,9 @@ class four_vector { public: four_vector(double x, double y, double z, double t) : - m_x(x), m_y(y), m_z(z), m_t(t), m_cc_called(false) {} + m_cc_called(false), m_x(x), m_y(y), m_z(z), m_t(t) {} four_vector(const four_vector& s) : - m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t), m_cc_called(true) {} + m_cc_called(true), m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t) {} double operator[](int i) { if (i == 0) return m_x; diff --git a/pypy/module/cppyy/test/fragile.h b/pypy/module/cppyy/test/fragile.h --- a/pypy/module/cppyy/test/fragile.h +++ b/pypy/module/cppyy/test/fragile.h @@ -112,4 +112,9 @@ enum E2 { kTwice=12 }; }; +class O { +public: + virtual int abstract() = 0; +}; + } // namespace fragile diff --git a/pypy/module/cppyy/test/fragile_LinkDef.h b/pypy/module/cppyy/test/fragile_LinkDef.h --- a/pypy/module/cppyy/test/fragile_LinkDef.h +++ b/pypy/module/cppyy/test/fragile_LinkDef.h @@ -23,6 +23,7 @@ #pragma link C++ class fragile::L; #pragma link C++ class fragile::M; #pragma link C++ class fragile::N; +#pragma link C++ class fragile::O; #pragma link C++ class fragile::nested1::A; #pragma link C++ class fragile::nested1::nested2::A; #pragma link C++ class fragile::nested1::nested2::nested3::A; diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -435,14 +435,16 @@ class AppTestCINTFUNCTION: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + _pypytest_leaks = None # TODO: figure out the false positives # test the function callbacks; this does not work with Reflex, as it can # not generate functions on the fly (it might with cffi?) + @py.test.mark.dont_track_allocations("TODO: understand; initialization left-over?") def test01_global_function_callback(self): """Test callback of a python global function""" - import cppyy + import cppyy, gc TF1 = cppyy.gbl.TF1 def identity(x): @@ -460,11 +462,12 @@ assert f.Eval(0.5) == 0.5 del f # force here, to prevent leak-check complaints + gc.collect() def test02_callable_object_callback(self): """Test callback of a python callable object""" - import cppyy + import cppyy, gc TF1 = cppyy.gbl.TF1 class Linear: @@ -478,13 +481,14 @@ assert f.Eval(1.3) == 7.6 del f # force here, to prevent leak-check complaints + gc.collect() def test03_fit_with_python_gaussian(self): """Test fitting with a python global function""" # note: this function is dread-fully slow when running testing un-translated - import cppyy, math + import cppyy, gc, math TF1, TH1F = cppyy.gbl.TF1, cppyy.gbl.TH1F def pygaus(x, par): @@ -515,6 +519,7 @@ assert round(result[2] - 1., 1) == 0 # s.d. del f # force here, to prevent leak-check complaints + gc.collect() class AppTestSURPLUS: diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -202,6 +202,12 @@ f = fragile.fglobal assert f.__doc__ == "void fragile::fglobal(int, double, char)" + try: + o = fragile.O() # raises TypeError + assert 0 + except TypeError, e: + assert "cannot instantiate abstract class 'O'" in str(e) + def test11_dir(self): """Test __dir__ method""" diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -338,8 +338,13 @@ import cppyy example01 = cppyy.gbl.example01 + assert example01.getCount() == 0 + o = example01() assert type(o) == example01 + assert example01.getCount() == 1 + o.destruct() + assert example01.getCount() == 0 class MyClass1(example01): def myfunc(self): @@ -348,7 +353,10 @@ o = MyClass1() assert type(o) == MyClass1 assert isinstance(o, example01) + assert example01.getCount() == 1 assert o.myfunc() == 1 + o.destruct() + assert example01.getCount() == 0 class MyClass2(example01): def __init__(self, what): @@ -357,7 +365,11 @@ o = MyClass2('hi') assert type(o) == MyClass2 + assert example01.getCount() == 1 assert o.what == 'hi' + o.destruct() + + assert example01.getCount() == 0 class AppTestPYTHONIFY_UI: From noreply at buildbot.pypy.org Thu May 1 09:32:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 09:32:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix (according to -A) Message-ID: <20140501073221.DF6FF1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71127:5fc41bfef56f Date: 2014-05-01 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/5fc41bfef56f/ Log: Test and fix (according to -A) diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -204,20 +204,20 @@ name = '?' if class_name is None: class_name = w_type.getname(space) # if the rest doesn't work - return "{method '%s' of '%s' objects}" % (name, class_name) + return "" % (name, class_name) def create_spec_for_function(space, w_func): if w_func.w_module is not None: module = space.str_w(w_func.w_module) if module != '__builtin__': - return '{%s.%s}' % (module, w_func.name) - return '{%s}' % w_func.name + return '<%s.%s>' % (module, w_func.name) + return '<%s>' % w_func.name def create_spec_for_object(space, w_obj): class_name = space.type(w_obj).getname(space) - return "{'%s' object}" % (class_name,) + return "<'%s' object>" % (class_name,) def create_spec(space, w_arg): diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -11,6 +11,22 @@ import _lsprof assert repr(_lsprof.Profiler) == "" + def test_builtins(self): + import _lsprof + prof = _lsprof.Profiler() + lst = [] + prof.enable() + lst.append(len(lst)) + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + "", + ) + for entry in stats: + assert entry.code in expected + def test_direct(self): import _lsprof def getticks(): @@ -37,10 +53,8 @@ stats = prof.getstats() entries = {} for entry in stats: - if not hasattr(entry.code, 'co_name'): - print entry.code - else: - entries[entry.code.co_name] = entry + assert hasattr(entry.code, 'co_name') + entries[entry.code.co_name] = entry efoo = entries['foo'] assert efoo.callcount == 2 assert efoo.reccallcount == 1 From noreply at buildbot.pypy.org Thu May 1 10:39:43 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 1 May 2014 10:39:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: fine-grained locking draft Message-ID: <20140501083943.9FDB81C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5205:54603e04954f Date: 2014-05-01 10:19 +0200 http://bitbucket.org/pypy/extradoc/changeset/54603e04954f/ Log: fine-grained locking draft diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -198,17 +198,34 @@ \subsubsection{Fine-Grained Locking} The first obvious candidate to replace the GIL is to use multiple -locks instead of a single global lock. By refining the granularity -of the locking approach, we gain the ability to run code that does -not access the same objects in parallel. +locks instead of a single global lock. By refining the granularity of +the locking approach, we gain the ability to run code that does not +access the same objects in parallel. What we lose instead is the +simplicity of the GIL approach. With every additional lock, the +likeliness of deadlocks grows, as well as the overhead that acquiring +and releasing locks produces. +Jython\footnote{www.jython.org} is one project that implements an +interpreter for Python on the JVM\footnote{Java Virtual Machine} and +that uses fine-grained locking to correctly synchronize the +interpreter. For a language like Python, one needs quite a few, +carefully placed locks. Since there is no central location, the +complexity of the implementation is quite a bit greater compared to +using a GIL. Integrating external, non-thread-safe libraries should +however be very simple too. One could simply use one lock per library +to avoid this issue. -- support of atomic blocks?\\ -- hard to get right (deadlocks, performance, lock-granularity)\\ -- very hard to get right for a large language\\ -- hard to retro-fit, as all existing code assumes GIL semantics\\ -- (there are some semantic differences, right? not given perfect lock-placement, but well) -( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) +In the end, fine-grained locking can transparently replace the GIL +and therefore parallelize existing applications without any +changes. It does however not provide a better synchronization +mechanism to the application like e.g. atomic blocks. + +%% - support of atomic blocks?\\ +%% - hard to get right (deadlocks, performance, lock-granularity)\\ +%% - very hard to get right for a large language\\ +%% - hard to retro-fit, as all existing code assumes GIL semantics\\ +%% - (there are some semantic differences, right? not given perfect lock-placement, but well) +%% ( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) \subsubsection{Shared-Nothing / multiple processes} From noreply at buildbot.pypy.org Thu May 1 10:39:44 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 1 May 2014 10:39:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: shared-nothing draft Message-ID: <20140501083944.B4C091C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5206:2ad9a98221dd Date: 2014-05-01 10:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/2ad9a98221dd/ Log: shared-nothing draft diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -229,9 +229,34 @@ \subsubsection{Shared-Nothing / multiple processes} -- often needs major restructuring of programs (explicit data exchange)\\ -- sometimes communication overhead is too large\\ -- shared memory is a problem, copies of memory are too expensive +There are also approaches that work around the GIL instead of trying +to replace it. If an application can be split into completely +independent parts that only very rarely need to share anything, or +only do so via an external program like a database, then it is +sensible to have one GIL per independent part. As an example of such +an approach we look at the +\emph{multiprocessing}\footnote{https://docs.python.org/2/library/multiprocessing.html} +module of Python. In essence, it uses process-forking to provide the +application with multiple interpreters that can run in parallel. +Communication is then done explicitly through pipes. + +Obviously not every application fits well into this model and its +applicability is therefore quite limited. Performance is good as +long as the application does not need to communicate a lot, because +inter-process communication is relatively expensive. Also the +implementation of this approach is very cheap since one can +actually take an unmodfied GIL-supported interpreter and run +multiple of them in parallel. That way, we also inherit the +easy integration of external libraries without any changes. +While the model of explicit communication is often seen as a +superior way to synchronize concurrent applications because +of its explicitness, it does not actually introduce a better +synchronization mechanism for applications. + + +%% - often needs major restructuring of programs (explicit data exchange)\\ +%% - sometimes communication overhead is too large\\ +%% - shared memory is a problem, copies of memory are too expensive \subsubsection{Transactional Memory} \paragraph{HTM} From noreply at buildbot.pypy.org Thu May 1 11:36:04 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 1 May 2014 11:36:04 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: TM draft Message-ID: <20140501093604.31C6D1C3441@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5207:89a404ccd022 Date: 2014-05-01 11:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/89a404ccd022/ Log: TM draft diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -134,11 +134,14 @@ multi-threading in the interpreter. The basic guarantee is that the GIL may only be released in-between bytecode instructions. The interpreter can thus rely on complete isolation and atomicity of these -instructions. As a consequence, applications can rely on certain -operations to be atomic. While this is probably not a good idea, -it is used in practice. A solution replacing the GIL should therefore -uphold these guarantees, while preferably also be as easily -implementable as a GIL for the interpreter. +instructions. Additionally, it provides the application with a +sequential consistency model. As a consequence, applications can rely +on certain operations to be atomic and that they will always be +executed in the order in which they appear in the code. While +depending on this may not always be a good idea, it is done in +practice. A solution replacing the GIL should therefore uphold these +guarantees, while preferably also be as easily implementable as a GIL +for the interpreter. [xxx mention that the interpreter is typically very large and maintained by open-source communities] @@ -253,24 +256,86 @@ of its explicitness, it does not actually introduce a better synchronization mechanism for applications. - %% - often needs major restructuring of programs (explicit data exchange)\\ %% - sometimes communication overhead is too large\\ %% - shared memory is a problem, copies of memory are too expensive + \subsubsection{Transactional Memory} +Transactional memory (TM) can be used as a direct replacement for a +single global lock. Transactions provide the same atomicity and +isolation guarantees as the GIL provides for the execution of bytecode +instructions. So instead of acquiring and releasing the GIL between +these instructions, this approach runs the protected instructions +inside transactions. + +TM can be implemented in software (STM) or in hardware (HTM. There are +also some hybrid approaches that combine the two. We count these +hybrid approaches as STM, since they usually provide the same +capabilities as software-only approaches but with different +performance characteristics. We will now first look at HTM that +recently gained a lot of popularity by its introduction in common +desktop CPUs from Intel (Haswell generation). + \paragraph{HTM} -- false-sharing on cache-line level\\ -- limited capacity (caches, undocumented)\\ -- random aborts (haswell)\\ -- generally: transaction-length limited (no atomic blocks) +HTM provides us with transactions like any TM system does. It can +be used as a direct replacement for the GIL. However, as is common +with hardware-only solutions, there are quite a few limitations +that can not be lifted easily. For this comparison, we look at +the implementation of Intel in recent Haswell generation CPUs. + +HTM in these CPUs works on the level of caches. This has a few +consequences like false-sharing on the cache-line level, and most +importantly it limits the amount of memory that can be accessed within +a transaction. This transaction-length limitation makes it necessary +to have a fallback in place in case this limit is reached. In recent +attempts, the usual fallback is the GIL (XXX: cite). The current +generation of HTM hits this limit very often for our use case (XXX: +cite ruby GIL paper) and therefore does not parallelize that well. + +The performance of HTM is pretty good (XXX: cite again...) as it does +not introduce much overhead. And it can transparently parallelize +existing applications to some degree. The implementation is very +straight-forward because it directly replaces the GIL in a central +place. HTM is also directly compatible with any external library that +needs to be integrated and synchronized for use in multiple +threads. The one thing that is missing is support for a better +synchronization mechanism for the application. It is not possible +in general to expose the hardware-transactions to the application +in the form of atomic blocks because that would require much +longer transactions. + +%% - false-sharing on cache-line level\\ +%% - limited capacity (caches, undocumented)\\ +%% - random aborts (haswell)\\ +%% - generally: transaction-length limited (no atomic blocks) \paragraph{STM} -- overhead (100-1000\%) (barrier reference resolution, kills performance on low \#cpu) -(FastLane: low overhead, not much gain)\\ -- unlimited transaction length (easy atomic blocks) +STM provides all the same benefits as HTM except for its performance. +It is not unusual for the overhead introduced by STM to be between +100\% to even 1000\%. While STM systems often scale very well to a big +number of threads and eventually overtake the single-threaded +execution, they often provide no benefits at all for low numbers of +threads (1-8). There are some attempts (XXX: cite fastlane) that can +reduce the overhead a lot, but also scale very badly so that their +benefit on more than one thread is little. + +However, STM compared to HTM does not suffer from the same restricting +limitations. Transactions can be arbitrarily long. This makes it +possible to actually expose transactions to the application in the +form of atomic blocks. This is the only approach that enables a better +synchronization mechanism than locks for applications \emph{and} still +parallelizes when using it. We think this is a very important point +because it not only gives dynamic languages the ability to parallelize +(already commonplace in most other languages), but also pushes +parallel programming forward. Together with sequential consistency it +provides a lot of simplification for parallel applications. + +%% - overhead (100-1000\%) (barrier reference resolution, kills performance on low \#cpu) +%% (FastLane: low overhead, not much gain)\\ +%% - unlimited transaction length (easy atomic blocks) \section{The Way Forward} possible solution:\\ @@ -304,9 +369,3 @@ \end{document} -% Revision History -% -------- ------- -% Date Person Ver. Change -% ---- ------ ---- ------ - -% 2013.06.29 TU 0.1--4 comments on permission/copyright notices From noreply at buildbot.pypy.org Thu May 1 12:14:26 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 1 May 2014 12:14:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add some handwavy comparison table Message-ID: <20140501101426.DEE351D2BF8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5208:f1d159462e0a Date: 2014-05-01 12:14 +0200 http://bitbucket.org/pypy/extradoc/changeset/f1d159462e0a/ Log: add some handwavy comparison table diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -9,7 +9,7 @@ % authoryear To obtain author/year citation style instead of numeric. \usepackage[utf8]{inputenc} - +\usepackage{array} \usepackage{amsmath} @@ -338,6 +338,31 @@ %% - unlimited transaction length (easy atomic blocks) \section{The Way Forward} +\begin{table}[] + \centering + \begin{tabular}{|p{2cm}|c|c|c|c|c|} + \hline + & \textbf{GIL} & \parbox[t]{1cm}{\textbf{Fine-grained locking}} + & \parbox[t]{1cm}{\textbf{Shared-nothing}} & \textbf{HTM} & \textbf{STM}\\ + \hline + Performance & 0 & + & ++ & + & -{-} \\ + \hline + Existing applications & ++ & ++ & -{-} & ++ & ++ \\ + \hline + Better synchronization & 0 & 0 & - & - & ++ \\ + \hline + Implementation & ++ & - & ++ & ++ & ++ \\ + \hline + External libraries & ++ & ++ & ++ & ++ & ++ \\ + \hline + \end{tabular} + \caption{Comparison (--/-/0/+/++)} + \label{tab:comparison} +\end{table} + +Comparison in Table \ref{tab:comparison} + + possible solution:\\ - use virtual memory paging to somehow lower the STM overhead\\ - tight integration with GC and jit? From noreply at buildbot.pypy.org Thu May 1 13:41:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 13:41:45 +0200 (CEST) Subject: [pypy-commit] pypy default: More tweaks: pass a (w_func, w_type) pair to the elidable function Message-ID: <20140501114145.845EC1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71128:3f23bbb810db Date: 2014-05-01 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/3f23bbb810db/ Log: More tweaks: pass a (w_func, w_type) pair to the elidable function instead of a string, and make the function @elidable_promote as it should be what we want anyway. Use a new small container, W_DelayedBuiltinStr; see docstring. diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -59,7 +59,7 @@ self.tt, self.it, calls_repr)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsEntry.typedef = TypeDef( 'StatsEntry', @@ -86,7 +86,7 @@ frame_repr, self.callcount, self.reccallcount, self.tt, self.it)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsSubEntry.typedef = TypeDef( 'SubStatsEntry', @@ -215,18 +215,55 @@ return '<%s>' % w_func.name -def create_spec_for_object(space, w_obj): - class_name = space.type(w_obj).getname(space) +def create_spec_for_object(space, w_type): + class_name = w_type.getname(space) return "<'%s' object>" % (class_name,) -def create_spec(space, w_arg): +class W_DelayedBuiltinStr(W_Root): + # This class should not be seen at app-level, but is useful to + # contain a (w_func, w_type) pair returned by prepare_spec(). + # Turning this pair into a string cannot be done eagerly in + # an @elidable function because of space.str_w(), but it can + # be done lazily when we really want it. + + _immutable_fields_ = ['w_func', 'w_type'] + + def __init__(self, w_func, w_type): + self.w_func = w_func + self.w_type = w_type + self.w_string = None + + def wrap_string(self, space): + if self.w_string is None: + if self.w_type is None: + s = create_spec_for_function(space, self.w_func) + elif self.w_func is None: + s = create_spec_for_object(space, self.w_type) + else: + s = create_spec_for_method(space, self.w_func, self.w_type) + self.w_string = space.wrap(s) + return self.w_string + +W_DelayedBuiltinStr.typedef = TypeDef( + 'DelayedBuiltinStr', + __str__ = interp2app(W_DelayedBuiltinStr.wrap_string), +) + +def returns_code(space, w_frame): + if isinstance(w_frame, W_DelayedBuiltinStr): + return w_frame.wrap_string(space) + return w_frame # actually a PyCode object + + +def prepare_spec(w_arg): if isinstance(w_arg, Method): - return create_spec_for_method(space, w_arg.w_function, w_arg.w_class) + return (w_arg.w_function, w_arg.w_class) elif isinstance(w_arg, Function): - return create_spec_for_function(space, w_arg) + return (w_arg, None) else: - return create_spec_for_object(space, w_arg) + return (None, space.type(w_arg)) +prepare_spec._always_inline_ = True def lsprof_call(space, w_self, frame, event, w_arg): @@ -239,12 +276,10 @@ w_self._enter_return(code) elif event == 'c_call': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_call(key) + w_self._enter_builtin_call(w_arg) elif event == 'c_return' or event == 'c_exception': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_return(key) + w_self._enter_builtin_return(w_arg) else: # ignore or raise an exception??? pass @@ -307,13 +342,14 @@ return entry raise - @jit.elidable - def _get_or_make_builtin_entry(self, key, make=True): + @jit.elidable_promote() + def _get_or_make_builtin_entry(self, w_func, w_type, make): + key = (w_func, w_type) try: return self.builtin_data[key] except KeyError: if make: - entry = ProfilerEntry(self.space.wrap(key)) + entry = ProfilerEntry(W_DelayedBuiltinStr(w_func, w_type)) self.builtin_data[key] = entry return entry raise @@ -337,20 +373,18 @@ context._stop(self, entry) self.current_context = context.previous - def _enter_builtin_call(self, key): - self = jit.promote(self) - key = jit.promote_string(key) - entry = self._get_or_make_builtin_entry(key) + def _enter_builtin_call(self, w_arg): + w_func, w_type = prepare_spec(w_arg) + entry = self._get_or_make_builtin_entry(w_func, w_type, True) self.current_context = ProfilerContext(self, entry) - def _enter_builtin_return(self, key): + def _enter_builtin_return(self, w_arg): context = self.current_context if context is None: return - self = jit.promote(self) - key = jit.promote_string(key) + w_func, w_type = prepare_spec(w_arg) try: - entry = self._get_or_make_builtin_entry(key, False) + entry = self._get_or_make_builtin_entry(w_func, w_type, False) except KeyError: pass else: diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -27,6 +27,32 @@ for entry in stats: assert entry.code in expected + def test_builtins_callers(self): + import _lsprof + prof = _lsprof.Profiler(subcalls=True) + lst = [] + def f1(): + lst.append(len(lst)) + prof.enable(subcalls=True) + f1() + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + ) + by_id = set() + for entry in stats: + if entry.code == f1.func_code: + assert len(entry.calls) == 2 + for subentry in entry.calls: + assert subentry.code in expected + by_id.add(id(subentry.code)) + elif entry.code in expected: + by_id.add(id(entry.code)) + # :-( cProfile.py relies on the id() of the strings... + assert len(by_id) == len(expected) + def test_direct(self): import _lsprof def getticks(): From noreply at buildbot.pypy.org Thu May 1 13:41:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 13:41:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fixes Message-ID: <20140501114146.B48561C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71129:75f2886305e0 Date: 2014-05-01 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/75f2886305e0/ Log: Translation fixes diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -208,6 +208,7 @@ def create_spec_for_function(space, w_func): + assert isinstance(w_func, Function) if w_func.w_module is not None: module = space.str_w(w_func.w_module) if module != '__builtin__': @@ -256,7 +257,7 @@ return w_frame # actually a PyCode object -def prepare_spec(w_arg): +def prepare_spec(space, w_arg): if isinstance(w_arg, Method): return (w_arg.w_function, w_arg.w_class) elif isinstance(w_arg, Function): @@ -374,7 +375,7 @@ self.current_context = context.previous def _enter_builtin_call(self, w_arg): - w_func, w_type = prepare_spec(w_arg) + w_func, w_type = prepare_spec(self.space, w_arg) entry = self._get_or_make_builtin_entry(w_func, w_type, True) self.current_context = ProfilerContext(self, entry) @@ -382,7 +383,7 @@ context = self.current_context if context is None: return - w_func, w_type = prepare_spec(w_arg) + w_func, w_type = prepare_spec(self.space, w_arg) try: entry = self._get_or_make_builtin_entry(w_func, w_type, False) except KeyError: From noreply at buildbot.pypy.org Thu May 1 13:41:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 13:41:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Reintroduce definite crashes when using @elidable in a way that cannot be satisfied. Message-ID: <20140501114147.D380E1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71130:350eca4d9a9d Date: 2014-05-01 13:41 +0200 http://bitbucket.org/pypy/pypy/changeset/350eca4d9a9d/ Log: Reintroduce definite crashes when using @elidable in a way that cannot be satisfied. diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -266,14 +266,14 @@ # check that the result is really as expected if loopinvariant: if extraeffect != EffectInfo.EF_LOOPINVARIANT: - from rpython.jit.codewriter.policy import log; log.WARNING( + raise Exception( "in operation %r: this calls a _jit_loop_invariant_ function," " but this contradicts other sources (e.g. it can have random" " effects): EF=%s" % (op, extraeffect)) if elidable: if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, EffectInfo.EF_ELIDABLE_CAN_RAISE): - from rpython.jit.codewriter.policy import log; log.WARNING( + raise Exception( "in operation %r: this calls an _elidable_function_," " but this contradicts other sources (e.g. it can have random" " effects): EF=%s" % (op, extraeffect)) From noreply at buildbot.pypy.org Thu May 1 14:09:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 14:09:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a pypy-c test for issue #1328 Message-ID: <20140501120941.581921C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71131:c7196a03a054 Date: 2014-05-01 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/c7196a03a054/ Log: Add a pypy-c test for issue #1328 diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -0,0 +1,31 @@ +import py +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestCProfile(BaseTestPyPyC): + + def test_cprofile_builtin(self): + def main(n): + import _lsprof + prof = _lsprof.Profiler() + i = 0 + lst = [] + prof.enable() + while i < n: + lst.append(i) # ID: append + lst.pop() # ID: pop + i += 1 + prof.disable() + return [(entry.code, entry.callcount) for entry in prof.getstats()] + # + log = self.run(main, [500]) + assert sorted(log.result) == [ + ("{method 'append' of 'list' objects}", 500), + ("{method 'disable' of '_lsprof.Profiler' objects}", 1), + ("{method 'pop' of 'list' objects}", 500), + ] + for method in ['append', 'pop']: + loop, = log.loops_by_id(method) + print loop.ops_by_id(method) + assert 'call(' not in repr(loop.ops_by_id(method)) + assert 'call_may_force(' not in repr(loop.ops_by_id(method)) + assert 'call_cond(' in repr(loop.ops_by_id(method)) From noreply at buildbot.pypy.org Thu May 1 14:19:54 2014 From: noreply at buildbot.pypy.org (taewookk) Date: Thu, 1 May 2014 14:19:54 +0200 (CEST) Subject: [pypy-commit] cffi default: issue #156: _Bool type already included in VS2013 Message-ID: <20140501121954.D41A81C01CB@cobra.cs.uni-duesseldorf.de> Author: Taewook Kang Branch: Changeset: r1509:d993a73f4d60 Date: 2014-05-01 20:27 +0900 http://bitbucket.org/cffi/cffi/changeset/d993a73f4d60/ Log: issue #156: _Bool type already included in VS2013 diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -222,7 +222,9 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; +#if !defined(_MSC_VER) || _MSC_VER <= 1700 typedef unsigned char _Bool; +#endif /************************************************************/ diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -780,7 +780,9 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; +#if !defined(_MSC_VER) || _MSC_VER <= 1700 typedef unsigned char _Bool; +#endif #else #if (defined (__SVR4) && defined (__sun)) || defined(_AIX) # include From noreply at buildbot.pypy.org Thu May 1 14:21:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 14:21:00 +0200 (CEST) Subject: [pypy-commit] pypy default: fix the test Message-ID: <20140501122100.2E52E1D2783@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71132:caee1b15d940 Date: 2014-05-01 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/caee1b15d940/ Log: fix the test diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -19,13 +19,13 @@ # log = self.run(main, [500]) assert sorted(log.result) == [ - ("{method 'append' of 'list' objects}", 500), - ("{method 'disable' of '_lsprof.Profiler' objects}", 1), - ("{method 'pop' of 'list' objects}", 500), + ("", 500), + ("", 1), + ("", 500), ] for method in ['append', 'pop']: loop, = log.loops_by_id(method) print loop.ops_by_id(method) - assert 'call(' not in repr(loop.ops_by_id(method)) - assert 'call_may_force(' not in repr(loop.ops_by_id(method)) - assert 'call_cond(' in repr(loop.ops_by_id(method)) + assert ' call(' not in repr(loop.ops_by_id(method)) + assert ' call_may_force(' not in repr(loop.ops_by_id(method)) + assert ' cond_call(' in repr(loop.ops_by_id(method)) From noreply at buildbot.pypy.org Thu May 1 14:25:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 1 May 2014 14:25:52 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more text; corrections Message-ID: <20140501122552.5C22C1D2783@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5209:98a3ff04f14b Date: 2014-05-01 14:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/98a3ff04f14b/ Log: more text; corrections diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -69,14 +69,14 @@ have a problem. While there is certainly a lot of popularity around languages like Python and Ruby, their ability to make use of multiple cores is somewhat limited. For ease of implementation they chose to -use a single, global interpreter lock (GIL) to synchronize the +use a single, global interpreter lock (GIL) to synchronise the execution of code in multiple threads. While this is a -straight-forward way to eliminate synchronization issues in the +straight-forward way to eliminate synchronisation issues in the interpreter, it prevents parallel execution. Code executed in multiple -threads will be serialized over this GIL so that only one thread can +threads will be serialised over this GIL so that only one thread can execute at a time. -There exist several solutions and work-arounds to remove or avoid the +There exist several solutions and workarounds to remove or avoid the GIL in order to benefit from multiple cores. We are going to discuss several of them and try to find the best way forward. The first approach uses fine-grained locking to replace the single GIL. Then @@ -96,7 +96,7 @@ These requirements are not easy to meet. We argue that STM is the overall winner. While it has a big performance problem currently, it gets more points in all the other categories. We think that it is the -only solution that also provides a better synchronization mechanism to +only solution that also provides a better synchronisation mechanism to the application in the form of atomic blocks. %% \subsection{Issue} @@ -124,13 +124,13 @@ \section{Discussion} \paragraph{dynamic language VM problems} - +XXX: - high allocation rate (short lived objects)\\ - (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) \subsection{Why is there a GIL?} -The GIL is a very simple synchronization mechanism for supporting +The GIL is a very simple synchronisation mechanism for supporting multi-threading in the interpreter. The basic guarantee is that the GIL may only be released in-between bytecode instructions. The interpreter can thus rely on complete isolation and atomicity of these @@ -151,7 +151,7 @@ thread-safe can voluntarily release the GIL themselves in order to still provide some parallelism. This is done for example for potentially long I/O operations. Consequently, I/O-bound, -multi-threaded applications can actually parallelize to some +multi-threaded applications can actually parallelise to some degree. Again, a potential solution should be able to integrate with external libraries with similar ease. We will however focus our argumentation more on running code in the interpreted language in @@ -159,21 +159,21 @@ Since the GIL is mostly an implementation detail of the interpreter, it is not exposed to the application running on top of it. To -synchronize memory accesses in applications using threads, the +synchronise memory accesses in applications using threads, the state-of-the-art still means explicit locking everywhere. It is well -known that using locks for synchronization is not easy. They are +known that using locks for synchronisation is not easy. They are non-composable, have overhead, may deadlock, limit scalability, and overall add a lot of complexity. For a better parallel programming model for dynamic languages, we propose another, well-known -synchronization mechanism called \emph{atomic blocks}. +synchronisation mechanism called \emph{atomic blocks}. Atomic blocks are composable, deadlock-free, higher-level and expose useful atomicity and isolation guarantees to the application for a -series of instructions. Interpreters using using a GIL can simply -guarantee that the GIL is not released during the execution of the -atomic block. Of course, this still means that no two atomic blocks -can execute in parallel or even concurrently. Potential solutions -that provide a good way to implement atomic blocks are therefore +series of instructions. Interpreters using a GIL can simply guarantee +that the GIL is not released during the execution of the atomic +block. Of course, this still means that no two atomic blocks can +execute in parallel or even concurrently. Potential solutions that +provide a good way to implement atomic blocks are therefore preferable. @@ -188,9 +188,9 @@ \item[Performance:] How well does the approach perform compared to the GIL on single and multiple threads? \item[Existing applications:] How big are the changes required to - integrate with and parallelize existing applications? -\item[Better synchronization:] Does the approach enable better - synchronization mechanisms for applications (e.g. atomic blocks)? + integrate with and parallelise existing applications? +\item[Better synchronisation:] Does the approach enable better + synchronisation mechanisms for applications (e.g. atomic blocks)? \item[Implementation:] How difficult is it to implement the approach in the interpreter? \item[External libraries:] Does the approach allow for easy @@ -210,7 +210,7 @@ Jython\footnote{www.jython.org} is one project that implements an interpreter for Python on the JVM\footnote{Java Virtual Machine} and -that uses fine-grained locking to correctly synchronize the +that uses fine-grained locking to correctly synchronise the interpreter. For a language like Python, one needs quite a few, carefully placed locks. Since there is no central location, the complexity of the implementation is quite a bit greater compared to @@ -219,8 +219,8 @@ to avoid this issue. In the end, fine-grained locking can transparently replace the GIL -and therefore parallelize existing applications without any -changes. It does however not provide a better synchronization +and therefore parallelise existing applications without any +changes. It does however not provide a better synchronisation mechanism to the application like e.g. atomic blocks. %% - support of atomic blocks?\\ @@ -248,13 +248,13 @@ long as the application does not need to communicate a lot, because inter-process communication is relatively expensive. Also the implementation of this approach is very cheap since one can -actually take an unmodfied GIL-supported interpreter and run +actually take an unmodified GIL-supported interpreter and run multiple of them in parallel. That way, we also inherit the easy integration of external libraries without any changes. While the model of explicit communication is often seen as a -superior way to synchronize concurrent applications because +superior way to synchronise concurrent applications because of its explicitness, it does not actually introduce a better -synchronization mechanism for applications. +synchronisation mechanism for applications. %% - often needs major restructuring of programs (explicit data exchange)\\ %% - sometimes communication overhead is too large\\ @@ -292,16 +292,16 @@ to have a fallback in place in case this limit is reached. In recent attempts, the usual fallback is the GIL (XXX: cite). The current generation of HTM hits this limit very often for our use case (XXX: -cite ruby GIL paper) and therefore does not parallelize that well. +cite ruby GIL paper) and therefore does not parallelise that well. The performance of HTM is pretty good (XXX: cite again...) as it does -not introduce much overhead. And it can transparently parallelize +not introduce much overhead. And it can transparently parallelise existing applications to some degree. The implementation is very straight-forward because it directly replaces the GIL in a central place. HTM is also directly compatible with any external library that -needs to be integrated and synchronized for use in multiple +needs to be integrated and synchronised for use in multiple threads. The one thing that is missing is support for a better -synchronization mechanism for the application. It is not possible +synchronisation mechanism for the application. It is not possible in general to expose the hardware-transactions to the application in the form of atomic blocks because that would require much longer transactions. @@ -326,9 +326,9 @@ limitations. Transactions can be arbitrarily long. This makes it possible to actually expose transactions to the application in the form of atomic blocks. This is the only approach that enables a better -synchronization mechanism than locks for applications \emph{and} still -parallelizes when using it. We think this is a very important point -because it not only gives dynamic languages the ability to parallelize +synchronisation mechanism than locks for applications \emph{and} still +parallelises when using it. We think this is a very important point +because it not only gives dynamic languages the ability to parallelise (already commonplace in most other languages), but also pushes parallel programming forward. Together with sequential consistency it provides a lot of simplification for parallel applications. @@ -337,35 +337,59 @@ %% (FastLane: low overhead, not much gain)\\ %% - unlimited transaction length (easy atomic blocks) + \section{The Way Forward} -\begin{table}[] + +\begin{table*}[!ht] \centering - \begin{tabular}{|p{2cm}|c|c|c|c|c|} + \begin{tabular}{|l|c|c|c|c|c|} \hline - & \textbf{GIL} & \parbox[t]{1cm}{\textbf{Fine-grained locking}} - & \parbox[t]{1cm}{\textbf{Shared-nothing}} & \textbf{HTM} & \textbf{STM}\\ + & \textbf{GIL} & \textbf{Fine-grained locking} + & \textbf{Shared-nothing} & \textbf{HTM} & \textbf{STM}\\ \hline - Performance & 0 & + & ++ & + & -{-} \\ + Performance (single-threaded) & ++ & + & ++ & ++ & -{-} \\ \hline - Existing applications & ++ & ++ & -{-} & ++ & ++ \\ + Performance (multi-threaded) & -{-} & + & + & + & + \\ \hline - Better synchronization & 0 & 0 & - & - & ++ \\ + Existing applications & ++ & ++ & -{-} & ++ & ++ \\ \hline - Implementation & ++ & - & ++ & ++ & ++ \\ + Better synchronisation & - & - & - & - & ++ \\ \hline - External libraries & ++ & ++ & ++ & ++ & ++ \\ + Implementation & ++ & - & ++ & ++ & ++ \\ + \hline + External libra\-ries & ++ & ++ & ++ & ++ & ++ \\ \hline \end{tabular} - \caption{Comparison (--/-/0/+/++)} + \caption{Comparison between the approaches (-{-}/-/o/+/++)} \label{tab:comparison} -\end{table} +\end{table*} -Comparison in Table \ref{tab:comparison} +Following the above argumentation for each approach we assembled a +general overview in Table \ref{tab:comparison}. The general picture is +everything else than clear. It looks like HTM may be a good solution +to replace the GIL in the future. Current implementations are however +far too limiting and do not provide good scaling. -possible solution:\\ -- use virtual memory paging to somehow lower the STM overhead\\ -- tight integration with GC and jit? +Just allowing for parallel execution only means that dynamic languages +catch up to all other languages that already provide real +parallelism. This is why we think that only the STM approach is a +viable solution in the long-term. It provides the application with a +simple memory model (sequential consistency) and a composable way to +synchronise memory accesses using atomic blocks. + +STM has a big performance problem. We believe that further work +to reduce the overhead by closely working together with the +hardware should be the focus of research. Hybrid approaches that +combine STM and HTM for performance may be able to overcome this +obstacle. + + + + +%% possible solution:\\ +%% - use virtual memory paging to somehow lower the STM overhead\\ +%% - tight integration with GC and jit? %% \appendix @@ -375,7 +399,7 @@ \acks -Acknowledgments, if needed. +Acknowledgments... % We recommend abbrvnat bibliography style. From noreply at buildbot.pypy.org Thu May 1 15:21:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 1 May 2014 15:21:02 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: say what we are working on Message-ID: <20140501132102.5D8F51C01CB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5210:d66a59f8ecf2 Date: 2014-05-01 15:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/d66a59f8ecf2/ Log: say what we are working on diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -146,7 +146,7 @@ by open-source communities] The GIL also allows for easy integration with external C libraries that -do not need to be thread-safe. For the duration of the calls, we +may not be thread-safe. For the duration of the calls, we simply do not release the GIL. External libraries that are explicitly thread-safe can voluntarily release the GIL themselves in order to still provide some parallelism. This is done for example for @@ -269,11 +269,11 @@ these instructions, this approach runs the protected instructions inside transactions. -TM can be implemented in software (STM) or in hardware (HTM. There are -also some hybrid approaches that combine the two. We count these +TM can be implemented in software (STM) or in hardware (HTM). There +are also hybrid approaches, which combine the two. We count these hybrid approaches as STM, since they usually provide the same capabilities as software-only approaches but with different -performance characteristics. We will now first look at HTM that +performance characteristics. We will now first look at HTM, which recently gained a lot of popularity by its introduction in common desktop CPUs from Intel (Haswell generation). @@ -368,21 +368,27 @@ Following the above argumentation for each approach we assembled a general overview in Table \ref{tab:comparison}. The general picture is everything else than clear. It looks like HTM may be a good solution -to replace the GIL in the future. Current implementations are however -far too limiting and do not provide good scaling. +to replace the GIL in the near future. Current implementations are +however far too limiting and do not provide good scaling. -Just allowing for parallel execution only means that dynamic languages +Allowing for parallel execution just means that dynamic languages catch up to all other languages that already provide real parallelism. This is why we think that only the STM approach is a viable solution in the long-term. It provides the application with a simple memory model (sequential consistency) and a composable way to synchronise memory accesses using atomic blocks. -STM has a big performance problem. We believe that further work -to reduce the overhead by closely working together with the -hardware should be the focus of research. Hybrid approaches that -combine STM and HTM for performance may be able to overcome this -obstacle. +Unfortunately, STM has a big performance problem. One way to approach +this problem is to make STM systems that use the available hardware +better. We are currently working on a STM system that makes use of +several hardware features like virtual memory and memory segmentation. +We further tailor the system to the discussed use case which gives us +an advantage over other STM systems that are more general. With this +approach, initial results suggest that we can keep the overhead of STM +already below 50\%. A hybrid TM system, which also uses HTM to +accelerate certain tasks, looks like a very promising direction of +research too. In general we believe that further work to reduce the +overhead of STM is very worthwhile. From noreply at buildbot.pypy.org Thu May 1 16:34:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 16:34:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Change the Makefile for asmgcc: instead of a lot of separate rules, Message-ID: <20140501143442.A071C1C3441@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71133:d0edc0e21b53 Date: 2014-05-01 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/d0edc0e21b53/ Log: Change the Makefile for asmgcc: instead of a lot of separate rules, have one rule that contains all the steps needed to turn a %.c into a %.o (and not a %.lbl.s, which should reduce a lot the time taken by the final linking step). diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -421,24 +421,12 @@ #XXX: this conditional part is not tested at all if self.config.translation.gcrootfinder == 'asmgcc': - trackgcfiles = [cfile[:cfile.rfind('.')] for cfile in mk.cfiles] if self.translator.platform.name == 'msvc': - trackgcfiles = [f for f in trackgcfiles - if f.startswith(('implement', 'testing', - '../module_cache/module'))] - sfiles = ['%s.s' % (c,) for c in trackgcfiles] - lblsfiles = ['%s.lbl.s' % (c,) for c in trackgcfiles] - gcmapfiles = ['%s.gcmap' % (c,) for c in trackgcfiles] - mk.definition('ASMFILES', sfiles) - mk.definition('ASMLBLFILES', lblsfiles) - mk.definition('GCMAPFILES', gcmapfiles) - if self.translator.platform.name == 'msvc': - mk.definition('DEBUGFLAGS', '-MD -Zi') + raise Exception("msvc no longer supports asmgcc") + if self.config.translation.shared: + mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') else: - if self.config.translation.shared: - mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') - else: - mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g') + mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g') if self.config.translation.shared: mk.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") @@ -447,46 +435,28 @@ mk.definition('PYTHON', get_recent_cpython_executable()) - if self.translator.platform.name == 'msvc': - lblofiles = [] - for cfile in mk.cfiles: - f = cfile[:cfile.rfind('.')] - if f in trackgcfiles: - ofile = '%s.lbl.obj' % (f,) - else: - ofile = '%s.obj' % (f,) + mk.definition('GCMAPFILES', '$(subst .c,.gcmap,$(SOURCES))') + mk.definition('OBJECTS1', '$(subst .c,.o,$(SOURCES))') + mk.definition('OBJECTS', '$(OBJECTS1) gcmaptable.s') - lblofiles.append(ofile) - mk.definition('ASMLBLOBJFILES', lblofiles) - mk.definition('OBJECTS', 'gcmaptable.obj $(ASMLBLOBJFILES)') - # /Oi (enable intrinsics) and /Ob1 (some inlining) are mandatory - # even in debug builds - mk.definition('ASM_CFLAGS', '$(CFLAGS) $(CFLAGSEXTRA) /Oi /Ob1') - mk.rule('.SUFFIXES', '.s', []) - mk.rule('.s.obj', '', - 'cmd /c $(MASM) /nologo /Cx /Cp /Zm /coff /Fo$@ /c $< $(INCLUDEDIRS)') - mk.rule('.c.gcmap', '', - ['$(CC) /nologo $(ASM_CFLAGS) /c /FAs /Fa$*.s $< $(INCLUDEDIRS)', - 'cmd /c $(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc -t $*.s > $@'] - ) - mk.rule('gcmaptable.c', '$(GCMAPFILES)', - 'cmd /c $(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc $(GCMAPFILES) > $@') + # the rule that transforms %.c into %.o, by compiling it to + # %.s, then applying trackgcroot to get %.lbl.s and %.gcmap, and + # finally by using the assembler ($(CC) again for now) to get %.o + mk.rule('%.o %.gcmap', '%.c', [ + '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< ' + '-o $*.s -S $< $(INCLUDEDIRS)', + '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' + '-t $*.s > $*.gctmp', + '$(CC) -o $*.o -c $*.lbl.s', + 'mv $*.gctmp $*.gcmap', + 'rm $*.s $*.lbl.s']) - else: - mk.definition('OBJECTS', '$(ASMLBLFILES) gcmaptable.s') - mk.rule('%.s', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< -o $@ -S $< $(INCLUDEDIRS)') - mk.rule('%.s', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< -o $@ -S $< $(INCLUDEDIRS)') - mk.rule('%.lbl.s %.gcmap', '%.s', - [ - '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' - '-t $< > $*.gctmp', - 'mv $*.gctmp $*.gcmap']) - mk.rule('gcmaptable.s', '$(GCMAPFILES)', - [ - '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' - '$(GCMAPFILES) > $@.tmp', - 'mv $@.tmp $@']) - mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed") + # the rule to compute gcmaptable.s + mk.rule('gcmaptable.s', '$(GCMAPFILES)', + [ + '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' + '$(GCMAPFILES) > $@.tmp', + 'mv $@.tmp $@']) else: if self.translator.platform.name == 'msvc': From noreply at buildbot.pypy.org Thu May 1 16:34:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 16:34:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix, maybe? Hard to reproduce Message-ID: <20140501143443.C9BE11C3441@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71134:fc40700d203d Date: 2014-05-01 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/fc40700d203d/ Log: Fix, maybe? Hard to reproduce diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1123,6 +1123,8 @@ # fd1 = os.open(filename1, os.O_WRONLY | os.O_CREAT, 0666) fd2 = os.open(filename2, os.O_WRONLY | os.O_CREAT, 0666) + # try to ensure we get twice the exact same output below + gc.collect(); gc.collect(); gc.collect() rgc.dump_rpy_heap(fd1) rgc.dump_rpy_heap(fd2) # try twice in a row keepalive_until_here(s2) From noreply at buildbot.pypy.org Thu May 1 16:44:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 16:44:39 +0200 (CEST) Subject: [pypy-commit] pypy default: some gcc 4.9 support Message-ID: <20140501144439.AEA3F1C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71135:dea725674dbe Date: 2014-05-01 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/dea725674dbe/ Log: some gcc 4.9 support diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,6 +522,8 @@ 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations 'bextr', + # invalid instruction + 'ud2', ]) # a partial list is hopefully good enough for now; it's all to support From noreply at buildbot.pypy.org Thu May 1 17:02:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 17:02:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Redo 3f8b9a32c444, this time more conservatively: try to constant-fold Message-ID: <20140501150231.1AD7E1C3441@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71136:c8e9da1478dc Date: 2014-05-01 17:01 +0200 http://bitbucket.org/pypy/pypy/changeset/c8e9da1478dc/ Log: Redo 3f8b9a32c444, this time more conservatively: try to constant- fold the CALL_PURE operation from *both* rewrite.py and pure.py. diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -347,6 +347,21 @@ def forget_numberings(self, box): self.optimizer.forget_numberings(box) + def _can_optimize_call_pure(self, op): + arg_consts = [] + for i in range(op.numargs()): + arg = op.getarg(i) + const = self.get_constant_box(arg) + if const is None: + return None + arg_consts.append(const) + else: + # all constant arguments: check if we already know the result + try: + return self.optimizer.call_pure_results[arg_consts] + except KeyError: + return None + class Optimizer(Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -57,6 +57,16 @@ self.emit_operation(nextop) def optimize_CALL_PURE(self, op): + # Step 1: check if all arguments are constant + result = self._can_optimize_call_pure(op) + if result is not None: + # this removes a CALL_PURE with all constant arguments. + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return + + # Step 2: check if all arguments are the same as a previous + # CALL_PURE. args = self.optimizer.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -516,24 +516,13 @@ return False def optimize_CALL_PURE(self, op): - arg_consts = [] - for i in range(op.numargs()): - arg = op.getarg(i) - const = self.get_constant_box(arg) - if const is None: - break - arg_consts.append(const) - else: - # all constant arguments: check if we already know the result - try: - result = self.optimizer.call_pure_results[arg_consts] - except KeyError: - pass - else: - # this removes a CALL_PURE with all constant arguments. - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return + # this removes a CALL_PURE with all constant arguments. + # Note that it's also done in pure.py. For now we need both... + result = self._can_optimize_call_pure(op) + if result is not None: + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return self.emit_operation(op) def optimize_GUARD_NO_EXCEPTION(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5164,7 +5164,6 @@ self.optimize_strunicode_loop(ops, expected) def test_call_pure_vstring_const(self): - py.test.skip("implement me") ops = """ [] p0 = newstr(3) From noreply at buildbot.pypy.org Thu May 1 17:24:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 17:24:07 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140501152407.4BB0D1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71137:643e652b0b55 Date: 2014-05-01 11:22 -0400 http://bitbucket.org/pypy/pypy/changeset/643e652b0b55/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,4 @@ .. this is a revision shortly after release-2.3.x .. startrev: 0524dae88c75 - +.. branch: reflex-support From noreply at buildbot.pypy.org Thu May 1 17:29:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 17:29:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Actually, ud2 is unreachable, not ignored. Message-ID: <20140501152902.1C8ED1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71138:95d8506f3861 Date: 2014-05-01 17:28 +0200 http://bitbucket.org/pypy/pypy/changeset/95d8506f3861/ Log: Actually, ud2 is unreachable, not ignored. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,8 +522,6 @@ 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations 'bextr', - # invalid instruction - 'ud2', ]) # a partial list is hopefully good enough for now; it's all to support @@ -695,6 +693,9 @@ return self.visit_ret(line) return [] + def visit_ud2(self, line): + return InsnStop("ud2") # unreachable instruction + def visit_jmp(self, line): tablelabels = [] match = self.r_jmp_switch.match(line) From noreply at buildbot.pypy.org Thu May 1 18:39:38 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 18:39:38 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: clean up test Message-ID: <20140501163938.50F011C00B9@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71139:91e137303927 Date: 2014-05-01 19:39 +0300 http://bitbucket.org/pypy/pypy/changeset/91e137303927/ Log: clean up test diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -67,27 +67,21 @@ domain = 'google.com' result = [0] * nthreads threads = [None] * nthreads - print 'starting', 70 def lookup_name(i): name, aliases, address_list = gethostbyname_ex(domain) if name == domain: result[i] += 1 - print 'done',i,75 for i in range(nthreads): threads[i] = threading.Thread(target = lookup_name, args=[i]) threads[i].start() - print 'threads', 78 - print 'done', 79 for i in range(nthreads): threads[i].join() assert sum(result) == nthreads - print 'done', 82 def test_thread_safe_gethostbyaddr(): import threading nthreads = 10 ip = '8.8.8.8' - print 'starting', 87 domain = gethostbyaddr(ip)[0] result = [0] * nthreads threads = [None] * nthreads @@ -98,7 +92,6 @@ for i in range(nthreads): threads[i] = threading.Thread(target = lookup_addr, args=[ip, i]) threads[i].start() - print 'threads', 98 for i in range(nthreads): threads[i].join() assert sum(result) == nthreads From noreply at buildbot.pypy.org Thu May 1 19:53:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 May 2014 19:53:49 +0200 (CEST) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140501175349.91D0B1C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71140:cf6744cc5576 Date: 2014-04-29 17:03 -0700 http://bitbucket.org/pypy/pypy/changeset/cf6744cc5576/ Log: py3k compat diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,6 +1,7 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible @@ -11,7 +12,7 @@ def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) @@ -309,7 +310,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] From noreply at buildbot.pypy.org Thu May 1 19:53:50 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 May 2014 19:53:50 +0200 (CEST) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20140501175350.B5F461C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71141:5a6d4233a5f6 Date: 2014-04-29 17:10 -0700 http://bitbucket.org/pypy/pypy/changeset/5a6d4233a5f6/ Log: py3k compat diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1084,27 +1084,27 @@ s = S(autofree=True) b = buffer(s) assert len(b) == 40 - b[4] = 'X' - b[:3] = 'ABC' - assert b[:6] == 'ABC\x00X\x00' + b[4] = b'X' + b[:3] = b'ABC' + assert b[:6] == b'ABC\x00X\x00' A = _rawffi.Array('c') a = A(10, autofree=True) - a[3] = 'x' + a[3] = b'x' b = buffer(a) assert len(b) == 10 - assert b[3] == 'x' - b[6] = 'y' - assert a[6] == 'y' - b[3:5] = 'zt' - assert a[3] == 'z' - assert a[4] == 't' + assert b[3] == b'x' + b[6] = b'y' + assert a[6] == b'y' + b[3:5] = b'zt' + assert a[3] == b'z' + assert a[4] == b't' b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/objspace/std/test/test_strbufobject.py b/pypy/objspace/std/test/test_strbufobject.py --- a/pypy/objspace/std/test/test_strbufobject.py +++ b/pypy/objspace/std/test/test_strbufobject.py @@ -45,9 +45,9 @@ assert len(t) == 4 def test_buffer(self): - s = 'a'.__add__('b') - assert buffer(s) == buffer('ab') - assert memoryview(s) == 'ab' + s = b'a'.__add__(b'b') + assert buffer(s) == buffer(b'ab') + assert memoryview(s) == b'ab' def test_add_strbuf(self): # make three strbuf objects From noreply at buildbot.pypy.org Thu May 1 19:53:51 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 May 2014 19:53:51 +0200 (CEST) Subject: [pypy-commit] pypy default: use get_module_type_name for %T so we more closely match cpython, at least for Message-ID: <20140501175351.CE6D01C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71142:1f2e9de489e6 Date: 2014-05-01 10:50 -0700 http://bitbucket.org/pypy/pypy/changeset/1f2e9de489e6/ Log: use get_module_type_name for %T so we more closely match cpython, at least for builtin types diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -362,9 +362,9 @@ value = getattr(self, attr) if fmt == 'R': result = space.str_w(space.repr(value)) - elif fmt in 'NT': - if fmt == 'T': - value = space.type(value) + elif fmt == 'T': + result = space.type(value).get_module_type_name() + elif fmt == 'N': result = value.getname(space) else: result = str(value) @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).getname(space) + %T - The result of space.type(w_arg).get_module_type_name() """ if not len(args): From noreply at buildbot.pypy.org Thu May 1 19:53:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 May 2014 19:53:52 +0200 (CEST) Subject: [pypy-commit] pypy default: backout c94a4ee2aa7d: %T calls get_module_type_name now Message-ID: <20140501175352.E8DCF1C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71143:45957dfd5ee6 Date: 2014-05-01 10:50 -0700 http://bitbucket.org/pypy/pypy/changeset/45957dfd5ee6/ Log: backout c94a4ee2aa7d: %T calls get_module_type_name now diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1415,10 +1415,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): From noreply at buildbot.pypy.org Thu May 1 19:53:54 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 May 2014 19:53:54 +0200 (CEST) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20140501175354.110721C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71144:4cc9b702d120 Date: 2014-05-01 10:52 -0700 http://bitbucket.org/pypy/pypy/changeset/4cc9b702d120/ Log: merge upstream diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1084,27 +1084,27 @@ s = S(autofree=True) b = buffer(s) assert len(b) == 40 - b[4] = 'X' - b[:3] = 'ABC' - assert b[:6] == 'ABC\x00X\x00' + b[4] = b'X' + b[:3] = b'ABC' + assert b[:6] == b'ABC\x00X\x00' A = _rawffi.Array('c') a = A(10, autofree=True) - a[3] = 'x' + a[3] = b'x' b = buffer(a) assert len(b) == 10 - assert b[3] == 'x' - b[6] = 'y' - assert a[6] == 'y' - b[3:5] = 'zt' - assert a[3] == 'z' - assert a[4] == 't' + assert b[3] == b'x' + b[6] = b'y' + assert a[6] == b'y' + b[3:5] = b'zt' + assert a[3] == b'z' + assert a[4] == b't' b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,6 +1,7 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible @@ -11,7 +12,7 @@ def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) @@ -302,7 +303,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] diff --git a/pypy/objspace/std/test/test_strbufobject.py b/pypy/objspace/std/test/test_strbufobject.py --- a/pypy/objspace/std/test/test_strbufobject.py +++ b/pypy/objspace/std/test/test_strbufobject.py @@ -45,9 +45,9 @@ assert len(t) == 4 def test_buffer(self): - s = 'a'.__add__('b') - assert buffer(s) == buffer('ab') - assert memoryview(s) == 'ab' + s = b'a'.__add__(b'b') + assert buffer(s) == buffer(b'ab') + assert memoryview(s) == b'ab' def test_add_strbuf(self): # make three strbuf objects From noreply at buildbot.pypy.org Thu May 1 20:13:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 20:13:07 +0200 (CEST) Subject: [pypy-commit] pypy default: backout c8e9da1478dc, broke translation Message-ID: <20140501181307.10A831C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71145:7b0b1b52601d Date: 2014-05-01 14:11 -0400 http://bitbucket.org/pypy/pypy/changeset/7b0b1b52601d/ Log: backout c8e9da1478dc, broke translation diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -347,21 +347,6 @@ def forget_numberings(self, box): self.optimizer.forget_numberings(box) - def _can_optimize_call_pure(self, op): - arg_consts = [] - for i in range(op.numargs()): - arg = op.getarg(i) - const = self.get_constant_box(arg) - if const is None: - return None - arg_consts.append(const) - else: - # all constant arguments: check if we already know the result - try: - return self.optimizer.call_pure_results[arg_consts] - except KeyError: - return None - class Optimizer(Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -57,16 +57,6 @@ self.emit_operation(nextop) def optimize_CALL_PURE(self, op): - # Step 1: check if all arguments are constant - result = self._can_optimize_call_pure(op) - if result is not None: - # this removes a CALL_PURE with all constant arguments. - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return - - # Step 2: check if all arguments are the same as a previous - # CALL_PURE. args = self.optimizer.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -516,13 +516,24 @@ return False def optimize_CALL_PURE(self, op): - # this removes a CALL_PURE with all constant arguments. - # Note that it's also done in pure.py. For now we need both... - result = self._can_optimize_call_pure(op) - if result is not None: - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return + arg_consts = [] + for i in range(op.numargs()): + arg = op.getarg(i) + const = self.get_constant_box(arg) + if const is None: + break + arg_consts.append(const) + else: + # all constant arguments: check if we already know the result + try: + result = self.optimizer.call_pure_results[arg_consts] + except KeyError: + pass + else: + # this removes a CALL_PURE with all constant arguments. + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return self.emit_operation(op) def optimize_GUARD_NO_EXCEPTION(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5164,6 +5164,7 @@ self.optimize_strunicode_loop(ops, expected) def test_call_pure_vstring_const(self): + py.test.skip("implement me") ops = """ [] p0 = newstr(3) From noreply at buildbot.pypy.org Thu May 1 20:13:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 20:13:08 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140501181308.5F7A71C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71146:45da6ce7a49e Date: 2014-05-01 14:11 -0400 http://bitbucket.org/pypy/pypy/changeset/45da6ce7a49e/ Log: merge heads diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1415,10 +1415,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -362,9 +362,9 @@ value = getattr(self, attr) if fmt == 'R': result = space.str_w(space.repr(value)) - elif fmt in 'NT': - if fmt == 'T': - value = space.type(value) + elif fmt == 'T': + result = space.type(value).get_module_type_name() + elif fmt == 'N': result = value.getname(space) else: result = str(value) @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).getname(space) + %T - The result of space.type(w_arg).get_module_type_name() """ if not len(args): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1084,27 +1084,27 @@ s = S(autofree=True) b = buffer(s) assert len(b) == 40 - b[4] = 'X' - b[:3] = 'ABC' - assert b[:6] == 'ABC\x00X\x00' + b[4] = b'X' + b[:3] = b'ABC' + assert b[:6] == b'ABC\x00X\x00' A = _rawffi.Array('c') a = A(10, autofree=True) - a[3] = 'x' + a[3] = b'x' b = buffer(a) assert len(b) == 10 - assert b[3] == 'x' - b[6] = 'y' - assert a[6] == 'y' - b[3:5] = 'zt' - assert a[3] == 'z' - assert a[4] == 't' + assert b[3] == b'x' + b[6] = b'y' + assert a[6] == b'y' + b[3:5] = b'zt' + assert a[3] == b'z' + assert a[4] == b't' b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,6 +1,7 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible @@ -11,7 +12,7 @@ def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) @@ -302,7 +303,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] diff --git a/pypy/objspace/std/test/test_strbufobject.py b/pypy/objspace/std/test/test_strbufobject.py --- a/pypy/objspace/std/test/test_strbufobject.py +++ b/pypy/objspace/std/test/test_strbufobject.py @@ -45,9 +45,9 @@ assert len(t) == 4 def test_buffer(self): - s = 'a'.__add__('b') - assert buffer(s) == buffer('ab') - assert memoryview(s) == 'ab' + s = b'a'.__add__(b'b') + assert buffer(s) == buffer(b'ab') + assert memoryview(s) == b'ab' def test_add_strbuf(self): # make three strbuf objects diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,8 +522,6 @@ 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations 'bextr', - # invalid instruction - 'ud2', ]) # a partial list is hopefully good enough for now; it's all to support @@ -695,6 +693,9 @@ return self.visit_ret(line) return [] + def visit_ud2(self, line): + return InsnStop("ud2") # unreachable instruction + def visit_jmp(self, line): tablelabels = [] match = self.r_jmp_switch.match(line) From noreply at buildbot.pypy.org Thu May 1 21:30:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 1 May 2014 21:30:06 +0200 (CEST) Subject: [pypy-commit] pypy default: adjust per 1f2e9de489e6 Message-ID: <20140501193006.C610B1C01CB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71147:ce13767ee917 Date: 2014-05-01 12:29 -0700 http://bitbucket.org/pypy/pypy/changeset/ce13767ee917/ Log: adjust per 1f2e9de489e6 diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -218,19 +218,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert exc.value.args[0] == "'Num' object has no attribute 'n'" + assert "Num' object has no attribute 'n'" in exc.value.args[0] x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert exc.value.args[0] == "'Num' object has no attribute 'lineno'" + assert "Num' object has no attribute 'lineno'" in exc.value.args[0] y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert exc.value.args[0] == "'Num' object has no attribute 'foobar'" + assert "Num' object has no attribute 'foobar'" in exc.value.args[0] x = ast.Num(lineno=2) assert x.lineno == 2 @@ -423,4 +423,3 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} - \ No newline at end of file From noreply at buildbot.pypy.org Thu May 1 21:39:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 21:39:07 +0200 (CEST) Subject: [pypy-commit] pypy default: skipped test showing difference Message-ID: <20140501193907.798161C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71148:c8d61f60c005 Date: 2014-05-01 15:38 -0400 http://bitbucket.org/pypy/pypy/changeset/c8d61f60c005/ Log: skipped test showing difference diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -19,6 +19,11 @@ ast = self.ast assert isinstance(ast.__version__, str) + def test_flags(self): + skip("broken") + from copy_reg import _HEAPTYPE + assert self.ast.Module.__flags__ & _HEAPTYPE + def test_build_ast(self): ast = self.ast mod = self.get_ast("x = 4") From noreply at buildbot.pypy.org Thu May 1 22:22:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 22:22:26 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: try a different approach Message-ID: <20140501202226.4012A1C05CE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71149:87914f035951 Date: 2014-05-01 22:22 +0300 http://bitbucket.org/pypy/pypy/changeset/87914f035951/ Log: try a different approach diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -20,7 +20,6 @@ from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof -from rpython.rlib import rthread INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): @@ -30,8 +29,6 @@ constants = _c.constants locals().update(constants) # Define constants from _c -ll_locks = {} - if _c.WIN32: from rpython.rlib import rwin32 def rsocket_startup(): @@ -41,12 +38,9 @@ assert res == 0 finally: lltype.free(wsadata, flavor='raw') - ll_locks['gethostbyname'] = rthread.allocate_lock() - ll_locks['gethostbyaddr'] = rthread.allocate_lock() else: def rsocket_startup(): - ll_locks['gethostbyname'] = rthread.allocate_lock() - ll_locks['gethostbyaddr'] = rthread.allocate_lock() + pass def ntohs(x): @@ -1130,18 +1124,18 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name): +def gethostbyname_ex(name, lock): # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) - with ll_locks['gethostbyname']: + with lock: hostent = _c.gethostbyname(name) return gethost_common(name, hostent, addr) -def gethostbyaddr(ip): +def gethostbyaddr(ip, lock): # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - with ll_locks['gethostbyaddr']: + with lock: p, size = addr.lock_in_addr() try: hostent = _c.gethostbyaddr(p, size, addr.family) diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -3,6 +3,14 @@ from rpython.rlib.rsocket import * import socket as cpy_socket +class DummyLock(object): + def __enter__(self): + pass + + def __exit__(self, *args): + pass + + def setup_module(mod): rsocket_startup() @@ -47,7 +55,7 @@ def test_gethostbyname_ex(): for host in ["localhost", "127.0.0.1"]: - name, aliases, address_list = gethostbyname_ex(host) + name, aliases, address_list = gethostbyname_ex(host, DummyLock()) allnames = [name] + aliases for n in allnames: assert isinstance(n, str) @@ -67,8 +75,9 @@ domain = 'google.com' result = [0] * nthreads threads = [None] * nthreads + lock = threading.Lock() def lookup_name(i): - name, aliases, address_list = gethostbyname_ex(domain) + name, aliases, address_list = gethostbyname_ex(domain, lock) if name == domain: result[i] += 1 for i in range(nthreads): @@ -82,11 +91,12 @@ import threading nthreads = 10 ip = '8.8.8.8' - domain = gethostbyaddr(ip)[0] + lock = threading.Lock() + domain = gethostbyaddr(ip, lock)[0] result = [0] * nthreads threads = [None] * nthreads def lookup_addr(ip, i): - name, aliases, address_list = gethostbyaddr(ip) + name, aliases, address_list = gethostbyaddr(ip, lock) if name == domain: result[i] += 1 for i in range(nthreads): @@ -110,7 +120,7 @@ with py.test.raises(ipv6): gethostbyaddr(host) continue - name, aliases, address_list = gethostbyaddr(host) + name, aliases, address_list = gethostbyaddr(host, DummyLock()) allnames = [name] + aliases for n in allnames: assert isinstance(n, str) From noreply at buildbot.pypy.org Thu May 1 22:22:27 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 22:22:27 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: add lock to pypy module, which will properly handle no-threading translation Message-ID: <20140501202227.80E031C05CE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71150:ea9e10befc8f Date: 2014-05-01 23:01 +0300 http://bitbucket.org/pypy/pypy/changeset/ea9e10befc8f/ Log: add lock to pypy module, which will properly handle no-threading translation diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,6 +17,7 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() + space.socket_gethostbyxxx_lock = space.allocate_lock() def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -43,7 +43,7 @@ for a host. The host argument is a string giving a host name or IP number. """ try: - res = rsocket.gethostbyname_ex(host) + res = rsocket.gethostbyname_ex(host, space.socket_gethostbyxxx_lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -56,7 +56,7 @@ for a host. The host argument is a string giving a host name or IP number. """ try: - res = rsocket.gethostbyaddr(host) + res = rsocket.gethostbyaddr(host, space.socket_gethostbyxxx_lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) From noreply at buildbot.pypy.org Thu May 1 22:22:28 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 22:22:28 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: fix ztranslation Message-ID: <20140501202228.9EE3A1C05CE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71151:78f3be62c4fb Date: 2014-05-01 23:01 +0300 http://bitbucket.org/pypy/pypy/changeset/78f3be62c4fb/ Log: fix ztranslation diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -10,6 +10,7 @@ mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) # force computation and record what we wrap module = mod.Module(space, W_Root()) + module.startup(space) for name in module.loaders: seeobj_w.append(module._load_lazily(space, name)) if hasattr(module, 'submodules'): From noreply at buildbot.pypy.org Thu May 1 23:02:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 23:02:19 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: add State to module Message-ID: <20140501210219.D328D1D2BFA@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71152:841653d07ec4 Date: 2014-05-02 00:01 +0300 http://bitbucket.org/pypy/pypy/changeset/841653d07ec4/ Log: add State to module diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,7 +17,8 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() - space.socket_gethostbyxxx_lock = space.allocate_lock() + from pypy.module._socket.interp_func import State + space.fromcache(State).alloc_lock(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -43,7 +43,8 @@ for a host. The host argument is a string giving a host name or IP number. """ try: - res = rsocket.gethostbyname_ex(host, space.socket_gethostbyxxx_lock) + lock = space.fromcache(State).gethostbyxxx_lock + res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -56,7 +57,8 @@ for a host. The host argument is a string giving a host name or IP number. """ try: - res = rsocket.gethostbyaddr(host, space.socket_gethostbyxxx_lock) + lock = space.fromcache(State).gethostbyxxx_lock + res = rsocket.gethostbyaddr(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -310,3 +312,11 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) + +class State(object): + def __init__(self, space): + self.gethostbyxxx_lock = None + + def alloc_lock(self, space): + self.gethostbyxxx_lock = space.allocate_lock() + From noreply at buildbot.pypy.org Thu May 1 23:09:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 1 May 2014 23:09:58 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: we don't expect socketerror from this Message-ID: <20140501210958.C05151C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: issue1430 Changeset: r71153:8e3ca2c46a2f Date: 2014-05-01 17:05 -0400 http://bitbucket.org/pypy/pypy/changeset/8e3ca2c46a2f/ Log: we don't expect socketerror from this diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -42,8 +42,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).gethostbyxxx_lock try: - lock = space.fromcache(State).gethostbyxxx_lock res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: raise converted_error(space, e) @@ -56,8 +56,8 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).gethostbyxxx_lock try: - lock = space.fromcache(State).gethostbyxxx_lock res = rsocket.gethostbyaddr(host, lock) except SocketError, e: raise converted_error(space, e) From noreply at buildbot.pypy.org Thu May 1 23:14:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 May 2014 23:14:15 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: make API backward-compatible Message-ID: <20140501211415.4587C1C00B9@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: issue1430 Changeset: r71154:2e3fc0780b66 Date: 2014-05-02 00:14 +0300 http://bitbucket.org/pypy/pypy/changeset/2e3fc0780b66/ Log: make API backward-compatible diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -1124,14 +1124,22 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name, lock): +class DummyLock(object): + def __enter__(self): + pass + + def __exit__(self, *args): + pass + + +def gethostbyname_ex(name, lock=DummyLock()): # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) with lock: hostent = _c.gethostbyname(name) return gethost_common(name, hostent, addr) -def gethostbyaddr(ip, lock): +def gethostbyaddr(ip, lock=DummyLock()): # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -3,13 +3,6 @@ from rpython.rlib.rsocket import * import socket as cpy_socket -class DummyLock(object): - def __enter__(self): - pass - - def __exit__(self, *args): - pass - def setup_module(mod): rsocket_startup() @@ -55,7 +48,7 @@ def test_gethostbyname_ex(): for host in ["localhost", "127.0.0.1"]: - name, aliases, address_list = gethostbyname_ex(host, DummyLock()) + name, aliases, address_list = gethostbyname_ex(host) allnames = [name] + aliases for n in allnames: assert isinstance(n, str) @@ -120,7 +113,7 @@ with py.test.raises(ipv6): gethostbyaddr(host) continue - name, aliases, address_list = gethostbyaddr(host, DummyLock()) + name, aliases, address_list = gethostbyaddr(host) allnames = [name] + aliases for n in allnames: assert isinstance(n, str) From noreply at buildbot.pypy.org Thu May 1 23:57:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 May 2014 23:57:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to constant-fold the CALL_PURE operation from *both* rewrite.py and Message-ID: <20140501215745.D21571D2BE1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71155:e974d3f77576 Date: 2014-05-01 23:56 +0200 http://bitbucket.org/pypy/pypy/changeset/e974d3f77576/ Log: Try to constant-fold the CALL_PURE operation from *both* rewrite.py and pure.py. (yet another attempt from c8e9da1478dc) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -347,6 +347,21 @@ def forget_numberings(self, box): self.optimizer.forget_numberings(box) + def _can_optimize_call_pure(self, op): + arg_consts = [] + for i in range(op.numargs()): + arg = op.getarg(i) + const = self.optimizer.get_constant_box(arg) + if const is None: + return None + arg_consts.append(const) + else: + # all constant arguments: check if we already know the result + try: + return self.optimizer.call_pure_results[arg_consts] + except KeyError: + return None + class Optimizer(Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -57,6 +57,16 @@ self.emit_operation(nextop) def optimize_CALL_PURE(self, op): + # Step 1: check if all arguments are constant + result = self._can_optimize_call_pure(op) + if result is not None: + # this removes a CALL_PURE with all constant arguments. + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return + + # Step 2: check if all arguments are the same as a previous + # CALL_PURE. args = self.optimizer.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -516,24 +516,13 @@ return False def optimize_CALL_PURE(self, op): - arg_consts = [] - for i in range(op.numargs()): - arg = op.getarg(i) - const = self.get_constant_box(arg) - if const is None: - break - arg_consts.append(const) - else: - # all constant arguments: check if we already know the result - try: - result = self.optimizer.call_pure_results[arg_consts] - except KeyError: - pass - else: - # this removes a CALL_PURE with all constant arguments. - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return + # this removes a CALL_PURE with all constant arguments. + # Note that it's also done in pure.py. For now we need both... + result = self._can_optimize_call_pure(op) + if result is not None: + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return self.emit_operation(op) def optimize_GUARD_NO_EXCEPTION(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5164,7 +5164,6 @@ self.optimize_strunicode_loop(ops, expected) def test_call_pure_vstring_const(self): - py.test.skip("implement me") ops = """ [] p0 = newstr(3) From noreply at buildbot.pypy.org Fri May 2 00:06:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 00:06:39 +0200 (CEST) Subject: [pypy-commit] pypy default: put DummyLock in rthread Message-ID: <20140501220639.91D0E1D2BE1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71156:c494afbfef83 Date: 2014-05-01 17:27 -0400 http://bitbucket.org/pypy/pypy/changeset/c494afbfef83/ Log: put DummyLock in rthread diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -692,23 +692,17 @@ def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" - if self.config.objspace.usemodules.thread: - # we use a sub-function to avoid putting the 'import' statement - # here, where the flow space would see it even if thread=False - return self.__allocate_lock() - else: - return dummy_lock - - def __allocate_lock(self): - from rpython.rlib.rthread import allocate_lock, error + from rpython.rlib import rthread + if not self.config.objspace.usemodules.thread: + return rthread.dummy_lock # hack: we can't have prebuilt locks if we're translating. # In this special situation we should just not lock at all # (translation is not multithreaded anyway). if not we_are_translated() and self.config.translating: raise CannotHaveLock() try: - return allocate_lock() - except error: + return rthread.allocate_lock() + except rthread.error: raise OperationError(self.w_RuntimeError, self.wrap("out of resources")) @@ -1722,24 +1716,6 @@ return space.getitem(w_glob, space.wrap('anonymous')) -class DummyLock(object): - def acquire(self, flag): - return True - - def release(self): - pass - - def _freeze_(self): - return True - - def __enter__(self): - pass - - def __exit__(self, *args): - pass - -dummy_lock = DummyLock() - # Table describing the regular part of the interface of object spaces, # namely all methods which only take w_ arguments and return a w_ result # (if any). diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,4 +1,3 @@ - from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.conftest import cdir @@ -113,6 +112,24 @@ assert len(y) == 0 return rffi.cast(lltype.Signed, ll_start_new_thread(x)) +class DummyLock(object): + def acquire(self, flag): + return True + + def release(self): + pass + + def _freeze_(self): + return True + + def __enter__(self): + pass + + def __exit__(self, *args): + pass + +dummy_lock = DummyLock() + class Lock(object): """ Container for low-level implementation of a lock object From noreply at buildbot.pypy.org Fri May 2 00:06:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 00:06:40 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140501220640.EAC891D2BE1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71157:0022e98eb8d3 Date: 2014-05-01 18:06 -0400 http://bitbucket.org/pypy/pypy/changeset/0022e98eb8d3/ Log: merge heads diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -692,23 +692,17 @@ def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" - if self.config.objspace.usemodules.thread: - # we use a sub-function to avoid putting the 'import' statement - # here, where the flow space would see it even if thread=False - return self.__allocate_lock() - else: - return dummy_lock - - def __allocate_lock(self): - from rpython.rlib.rthread import allocate_lock, error + from rpython.rlib import rthread + if not self.config.objspace.usemodules.thread: + return rthread.dummy_lock # hack: we can't have prebuilt locks if we're translating. # In this special situation we should just not lock at all # (translation is not multithreaded anyway). if not we_are_translated() and self.config.translating: raise CannotHaveLock() try: - return allocate_lock() - except error: + return rthread.allocate_lock() + except rthread.error: raise OperationError(self.w_RuntimeError, self.wrap("out of resources")) @@ -1722,24 +1716,6 @@ return space.getitem(w_glob, space.wrap('anonymous')) -class DummyLock(object): - def acquire(self, flag): - return True - - def release(self): - pass - - def _freeze_(self): - return True - - def __enter__(self): - pass - - def __exit__(self, *args): - pass - -dummy_lock = DummyLock() - # Table describing the regular part of the interface of object spaces, # namely all methods which only take w_ arguments and return a w_ result # (if any). diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,4 +1,3 @@ - from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.conftest import cdir @@ -113,6 +112,24 @@ assert len(y) == 0 return rffi.cast(lltype.Signed, ll_start_new_thread(x)) +class DummyLock(object): + def acquire(self, flag): + return True + + def release(self): + pass + + def _freeze_(self): + return True + + def __enter__(self): + pass + + def __exit__(self, *args): + pass + +dummy_lock = DummyLock() + class Lock(object): """ Container for low-level implementation of a lock object From noreply at buildbot.pypy.org Fri May 2 00:07:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 00:07:01 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: merge default Message-ID: <20140501220701.CFC251D2BE1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: issue1430 Changeset: r71158:3b27e0985e34 Date: 2014-05-01 17:53 -0400 http://bitbucket.org/pypy/pypy/changeset/3b27e0985e34/ Log: merge default diff too long, truncating to 2000 out of 2614 lines diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys, new + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,7 +184,7 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - mainmod = new.module('__main__') + mainmod = types.ModuleType('__main__') mainmod.__file__ = progname mainmod.__package__ = None runctx(code, mainmod.__dict__, None, options.outfile, options.sort) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,4 @@ .. this is a revision shortly after release-2.3.x .. startrev: 0524dae88c75 - +.. branch: reflex-support diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -692,23 +692,17 @@ def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" - if self.config.objspace.usemodules.thread: - # we use a sub-function to avoid putting the 'import' statement - # here, where the flow space would see it even if thread=False - return self.__allocate_lock() - else: - return dummy_lock - - def __allocate_lock(self): - from rpython.rlib.rthread import allocate_lock, error + from rpython.rlib import rthread + if not self.config.objspace.usemodules.thread: + return rthread.dummy_lock # hack: we can't have prebuilt locks if we're translating. # In this special situation we should just not lock at all # (translation is not multithreaded anyway). if not we_are_translated() and self.config.translating: raise CannotHaveLock() try: - return allocate_lock() - except error: + return rthread.allocate_lock() + except rthread.error: raise OperationError(self.w_RuntimeError, self.wrap("out of resources")) @@ -1415,10 +1409,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): @@ -1722,24 +1716,6 @@ return space.getitem(w_glob, space.wrap('anonymous')) -class DummyLock(object): - def acquire(self, flag): - return True - - def release(self): - pass - - def _freeze_(self): - return True - - def __enter__(self): - pass - - def __exit__(self, *args): - pass - -dummy_lock = DummyLock() - # Table describing the regular part of the interface of object spaces, # namely all methods which only take w_ arguments and return a w_ result # (if any). diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -362,9 +362,9 @@ value = getattr(self, attr) if fmt == 'R': result = space.str_w(space.repr(value)) - elif fmt in 'NT': - if fmt == 'T': - value = space.type(value) + elif fmt == 'T': + result = space.type(value).get_module_type_name() + elif fmt == 'N': result = value.getname(space) else: result = str(value) @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).getname(space) + %T - The result of space.type(w_arg).get_module_type_name() """ if not len(args): diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -19,6 +19,11 @@ ast = self.ast assert isinstance(ast.__version__, str) + def test_flags(self): + skip("broken") + from copy_reg import _HEAPTYPE + assert self.ast.Module.__flags__ & _HEAPTYPE + def test_build_ast(self): ast = self.ast mod = self.get_ast("x = 4") @@ -218,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert exc.value.args[0] == "'Num' object has no attribute 'n'" + assert "Num' object has no attribute 'n'" in exc.value.args[0] x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert exc.value.args[0] == "'Num' object has no attribute 'lineno'" + assert "Num' object has no attribute 'lineno'" in exc.value.args[0] y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert exc.value.args[0] == "'Num' object has no attribute 'foobar'" + assert "Num' object has no attribute 'foobar'" in exc.value.args[0] x = ast.Num(lineno=2) assert x.lineno == 2 @@ -423,4 +428,3 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} - \ No newline at end of file diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -59,7 +59,7 @@ self.tt, self.it, calls_repr)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsEntry.typedef = TypeDef( 'StatsEntry', @@ -86,7 +86,7 @@ frame_repr, self.callcount, self.reccallcount, self.tt, self.it)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsSubEntry.typedef = TypeDef( 'SubStatsEntry', @@ -189,50 +189,82 @@ subentry._stop(tt, it) - at jit.elidable_promote() def create_spec_for_method(space, w_function, w_type): - w_function = w_function + class_name = None if isinstance(w_function, Function): name = w_function.name + # try to get the real class that defines the method, + # which is a superclass of the class of the instance + from pypy.objspace.std.typeobject import W_TypeObject # xxx + if isinstance(w_type, W_TypeObject): + w_realclass, _ = space.lookup_in_type_where(w_type, name) + if isinstance(w_realclass, W_TypeObject): + class_name = w_realclass.get_module_type_name() else: name = '?' - # try to get the real class that defines the method, - # which is a superclass of the class of the instance - from pypy.objspace.std.typeobject import W_TypeObject # xxx - class_name = w_type.getname(space) # if the rest doesn't work - if isinstance(w_type, W_TypeObject) and name != '?': - w_realclass, _ = space.lookup_in_type_where(w_type, name) - if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() - return "{method '%s' of '%s' objects}" % (name, class_name) + if class_name is None: + class_name = w_type.getname(space) # if the rest doesn't work + return "" % (name, class_name) - at jit.elidable_promote() def create_spec_for_function(space, w_func): - if w_func.w_module is None: - module = '' + assert isinstance(w_func, Function) + if w_func.w_module is not None: + module = space.str_w(w_func.w_module) + if module != '__builtin__': + return '<%s.%s>' % (module, w_func.name) + return '<%s>' % w_func.name + + +def create_spec_for_object(space, w_type): + class_name = w_type.getname(space) + return "<'%s' object>" % (class_name,) + + +class W_DelayedBuiltinStr(W_Root): + # This class should not be seen at app-level, but is useful to + # contain a (w_func, w_type) pair returned by prepare_spec(). + # Turning this pair into a string cannot be done eagerly in + # an @elidable function because of space.str_w(), but it can + # be done lazily when we really want it. + + _immutable_fields_ = ['w_func', 'w_type'] + + def __init__(self, w_func, w_type): + self.w_func = w_func + self.w_type = w_type + self.w_string = None + + def wrap_string(self, space): + if self.w_string is None: + if self.w_type is None: + s = create_spec_for_function(space, self.w_func) + elif self.w_func is None: + s = create_spec_for_object(space, self.w_type) + else: + s = create_spec_for_method(space, self.w_func, self.w_type) + self.w_string = space.wrap(s) + return self.w_string + +W_DelayedBuiltinStr.typedef = TypeDef( + 'DelayedBuiltinStr', + __str__ = interp2app(W_DelayedBuiltinStr.wrap_string), +) + +def returns_code(space, w_frame): + if isinstance(w_frame, W_DelayedBuiltinStr): + return w_frame.wrap_string(space) + return w_frame # actually a PyCode object + + +def prepare_spec(space, w_arg): + if isinstance(w_arg, Method): + return (w_arg.w_function, w_arg.w_class) + elif isinstance(w_arg, Function): + return (w_arg, None) else: - module = space.str_w(w_func.w_module) - if module == '__builtin__': - module = '' - else: - module += '.' - return '{%s%s}' % (module, w_func.name) - - - at jit.elidable_promote() -def create_spec_for_object(space, w_obj): - class_name = space.type(w_obj).getname(space) - return "{'%s' object}" % (class_name,) - - -def create_spec(space, w_arg): - if isinstance(w_arg, Method): - return create_spec_for_method(space, w_arg.w_function, w_arg.w_class) - elif isinstance(w_arg, Function): - return create_spec_for_function(space, w_arg) - else: - return create_spec_for_object(space, w_arg) + return (None, space.type(w_arg)) +prepare_spec._always_inline_ = True def lsprof_call(space, w_self, frame, event, w_arg): @@ -245,12 +277,10 @@ w_self._enter_return(code) elif event == 'c_call': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_call(key) + w_self._enter_builtin_call(w_arg) elif event == 'c_return' or event == 'c_exception': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_return(key) + w_self._enter_builtin_return(w_arg) else: # ignore or raise an exception??? pass @@ -313,13 +343,14 @@ return entry raise - @jit.elidable - def _get_or_make_builtin_entry(self, key, make=True): + @jit.elidable_promote() + def _get_or_make_builtin_entry(self, w_func, w_type, make): + key = (w_func, w_type) try: return self.builtin_data[key] except KeyError: if make: - entry = ProfilerEntry(self.space.wrap(key)) + entry = ProfilerEntry(W_DelayedBuiltinStr(w_func, w_type)) self.builtin_data[key] = entry return entry raise @@ -343,18 +374,18 @@ context._stop(self, entry) self.current_context = context.previous - def _enter_builtin_call(self, key): - self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key) + def _enter_builtin_call(self, w_arg): + w_func, w_type = prepare_spec(self.space, w_arg) + entry = self._get_or_make_builtin_entry(w_func, w_type, True) self.current_context = ProfilerContext(self, entry) - def _enter_builtin_return(self, key): + def _enter_builtin_return(self, w_arg): context = self.current_context if context is None: return - self = jit.promote(self) + w_func, w_type = prepare_spec(self.space, w_arg) try: - entry = self._get_or_make_builtin_entry(key, False) + entry = self._get_or_make_builtin_entry(w_func, w_type, False) except KeyError: pass else: diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -11,6 +11,48 @@ import _lsprof assert repr(_lsprof.Profiler) == "" + def test_builtins(self): + import _lsprof + prof = _lsprof.Profiler() + lst = [] + prof.enable() + lst.append(len(lst)) + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + "", + ) + for entry in stats: + assert entry.code in expected + + def test_builtins_callers(self): + import _lsprof + prof = _lsprof.Profiler(subcalls=True) + lst = [] + def f1(): + lst.append(len(lst)) + prof.enable(subcalls=True) + f1() + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + ) + by_id = set() + for entry in stats: + if entry.code == f1.func_code: + assert len(entry.calls) == 2 + for subentry in entry.calls: + assert subentry.code in expected + by_id.add(id(subentry.code)) + elif entry.code in expected: + by_id.add(id(entry.code)) + # :-( cProfile.py relies on the id() of the strings... + assert len(by_id) == len(expected) + def test_direct(self): import _lsprof def getticks(): @@ -37,10 +79,8 @@ stats = prof.getstats() entries = {} for entry in stats: - if not hasattr(entry.code, 'co_name'): - print entry.code - else: - entries[entry.code.co_name] = entry + assert hasattr(entry.code, 'co_name') + entries[entry.code.co_name] = entry efoo = entries['foo'] assert efoo.callcount == 2 assert efoo.reccallcount == 1 diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1084,27 +1084,27 @@ s = S(autofree=True) b = buffer(s) assert len(b) == 40 - b[4] = 'X' - b[:3] = 'ABC' - assert b[:6] == 'ABC\x00X\x00' + b[4] = b'X' + b[:3] = b'ABC' + assert b[:6] == b'ABC\x00X\x00' A = _rawffi.Array('c') a = A(10, autofree=True) - a[3] = 'x' + a[3] = b'x' b = buffer(a) assert len(b) == 10 - assert b[3] == 'x' - b[6] = 'y' - assert a[6] == 'y' - b[3:5] = 'zt' - assert a[3] == 'z' - assert a[4] == 't' + assert b[3] == b'x' + b[6] = b'y' + assert a[6] == b'y' + b[3:5] = b'zt' + assert a[3] == b'z' + assert a[4] == b't' b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,7 +600,8 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_RSocket.typedef = TypeDef("socket", + __module__ = "_socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -313,6 +313,11 @@ cls.space = space cls.w_udir = space.wrap(str(udir)) + def test_module(self): + import _socket + assert _socket.socket.__name__ == 'socket' + assert _socket.socket.__module__ == '_socket' + def test_ntoa_exception(self): import _socket raises(_socket.error, _socket.inet_ntoa, "ab") diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -16,7 +16,7 @@ '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', - 'CPPInstance' : 'interp_cppyy.W_CPPInstance', + 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', } @@ -25,7 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', - 'Template' : 'pythonify.CppyyTemplateType', + 'Template' : 'pythonify.CPPTemplate', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -127,19 +127,18 @@ argc = len(args_w) try: - # Note: argcount is +1 for the class (== w_self) - if argc < 5 or 6 < argc: + if argc < 4 or 5 < argc: raise TypeError("wrong number of arguments") - # second argument must be a name - funcname = space.str_w(args_w[1]) + # first argument must be a name + funcname = space.str_w(args_w[0]) # last (optional) argument is number of parameters npar = 0 - if argc == 6: npar = space.int_w(args_w[5]) + if argc == 5: npar = space.int_w(args_w[4]) - # third argument must be a callable python object - w_callable = args_w[2] + # second argument must be a callable python object + w_callable = args_w[1] if not space.is_true(space.callable(w_callable)): raise TypeError("2nd argument is not a valid python callable") @@ -159,17 +158,21 @@ # so far, so good; leaves on issue: CINT is expecting a wrapper, but # we need the overload that takes a function pointer, which is not in # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, - space.float_w(args_w[3]), space.float_w(args_w[4]), npar) - - from pypy.module.cppyy import interp_cppyy - w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, - do_cast=False, python_owns=True, fresh=True) + newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, + space.float_w(args_w[2]), space.float_w(args_w[3]), npar) + + # w_self is a null-ptr bound as TF1 + from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator + cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) + cppself._rawobject = newinst + memory_regulator.register(cppself) # tie all the life times to the TF1 instance - space.setattr(w_instance, space.wrap('_callback'), w_callback) + space.setattr(w_self, space.wrap('_callback'), w_callback) - return w_instance + # by definition for __init__ + return None + except (OperationError, TypeError, IndexError), e: newargs_w = args_w[1:] # drop class @@ -312,7 +315,7 @@ # location w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.buffer_w(w_address) + buf = space.getarg_w('s*', w_address) from pypy.module._rawffi import buffer assert isinstance(buf, buffer.RawFFIBuffer) address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) @@ -395,7 +398,7 @@ _method_alias(space, w_pycppclass, "__len__", "GetSize") elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -155,18 +155,16 @@ the memory_regulator.""" _attrs_ = ['space', 'scope', 'index', 'cppmethod', 'arg_defs', 'args_required', - 'args_expected', 'converters', 'executor', '_funcaddr', 'cif_descr', - 'uses_local'] + 'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local'] _immutable_ = True - def __init__(self, space, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, declaring_scope, method_index, arg_defs, args_required): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.index = method_index self.cppmethod = capi.c_get_method(self.space, self.scope, method_index) self.arg_defs = arg_defs self.args_required = args_required - self.args_expected = len(arg_defs) # Setup of the method dispatch's innards is done lazily, i.e. only when # the method is actually used. @@ -176,6 +174,12 @@ self._funcaddr = lltype.nullptr(rffi.VOIDP.TO) self.uses_local = False + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) + cppinstance._nullcheck() + return cppinstance.get_cppthis(declaring_scope) + def _address_from_local_buffer(self, call_local, idx): if not call_local: return call_local @@ -277,7 +281,7 @@ funcaddr = methgetter(rffi.cast(capi.C_OBJECT, cppthis)) self._funcaddr = rffi.cast(rffi.VOIDP, funcaddr) - nargs = self.args_expected + 1 # +1: cppthis + nargs = len(self.arg_defs) + 1 # +1: cppthis # memory block for CIF description (note: not tracked as the life # time of methods is normally the duration of the application) @@ -335,7 +339,7 @@ # extra cif_descr.abi = clibffi.FFI_DEFAULT_ABI - cif_descr.nargs = self.args_expected + 1 # +1: cppthis + cif_descr.nargs = len(self.arg_defs) + 1 # +1: cppthis res = jit_libffi.jit_ffi_prep_cif(cif_descr) if res != clibffi.FFI_OK: @@ -405,28 +409,29 @@ class CPPFunction(CPPMethod): - """Global (namespaced) function dispatcher. For now, the base class has - all the needed functionality, by allowing the C++ this pointer to be null - in the call. An optimization is expected there, however.""" + """Global (namespaced) function dispatcher.""" _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return capi.C_NULL_OBJECT + def __repr__(self): return "CPPFunction: %s" % self.signature() class CPPTemplatedCall(CPPMethod): - """Method dispatcher that first needs to resolve the template instance. - Note that the derivation is from object: the CPPMethod is a data member.""" + """Method dispatcher that first resolves the template instance.""" - _attrs_ = ['space', 'templ_args', 'method'] + _attrs_ = ['space', 'templ_args'] _immutable_ = True - def __init__(self, space, templ_args, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, templ_args, declaring_scope, method_index, arg_defs, args_required): self.space = space self.templ_args = templ_args # TODO: might have to specialize for CPPTemplatedCall on CPPMethod/CPPFunction here - CPPMethod.__init__(self, space, containing_scope, method_index, arg_defs, args_required) + CPPMethod.__init__(self, space, declaring_scope, method_index, arg_defs, args_required) def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -456,24 +461,15 @@ _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return rffi.cast(capi.C_OBJECT, declaring_scope.handle) + def call(self, cppthis, args_w): - # TODO: these casts are very, very un-pretty; need to find a way of - # re-using CPPMethod's features w/o these roundabouts - vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - cppinstance = None - try: - cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) - use_args_w = args_w[1:] - except (OperationError, TypeError), e: - use_args_w = args_w - w_result = CPPMethod.call(self, vscope, use_args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) - if cppinstance: - cppinstance._rawobject = newthis - memory_regulator.register(cppinstance) - return args_w[0] - return wrap_cppobject(self.space, newthis, self.scope, - do_cast=False, python_owns=True, fresh=True) + # Note: this does not return a wrapped instance, just a pointer to the + # new instance; the overload must still wrap it before returning. Also, + # cppthis is declaring_scope.handle (as per unpack_cppthis(), above). + return CPPMethod.call(self, cppthis, args_w) def __repr__(self): return "CPPConstructor: %s" % self.signature() @@ -505,9 +501,10 @@ _attrs_ = ['space', 'scope', 'functions'] _immutable_fields_ = ['scope', 'functions[*]'] - def __init__(self, space, containing_scope, functions): + def __init__(self, space, declaring_scope, functions): self.space = space - self.scope = containing_scope + self.scope = declaring_scope + assert len(functions) from rpython.rlib import debug self.functions = debug.make_sure_not_resized(functions) @@ -520,12 +517,10 @@ @jit.unroll_safe @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) - if cppinstance is not None: - cppinstance._nullcheck() - cppthis = cppinstance.get_cppthis(self.scope) - else: - cppthis = capi.C_NULL_OBJECT + # instance handling is specific to the function type only, so take it out + # of the loop over function overloads + cppthis = self.functions[0].unpack_cppthis( + self.space, w_cppinstance, self.functions[0].scope) assert lltype.typeOf(cppthis) == capi.C_OBJECT # The following code tries out each of the functions in order. If @@ -585,6 +580,39 @@ ) +class W_CPPConstructorOverload(W_CPPOverload): + @jit.elidable_promote() + def is_static(self): + return self.space.w_False + + @jit.elidable_promote() + def unpack_cppthis(self, w_cppinstance): + return rffi.cast(capi.C_OBJECT, self.scope.handle) + + @jit.unroll_safe + @unwrap_spec(args_w='args_w') + def call(self, w_cppinstance, args_w): + w_result = W_CPPOverload.call(self, w_cppinstance, args_w) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if cppinstance is not None: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return w_cppinstance + return wrap_cppobject(self.space, newthis, self.functions[0].scope, + do_cast=False, python_owns=True, fresh=True) + + def __repr__(self): + return "W_CPPConstructorOverload(%s)" % [f.signature() for f in self.functions] + +W_CPPConstructorOverload.typedef = TypeDef( + 'CPPConstructorOverload', + is_static = interp2app(W_CPPConstructorOverload.is_static), + call = interp2app(W_CPPConstructorOverload.call), + signature = interp2app(W_CPPOverload.signature), +) + + class W_CPPBoundMethod(W_Root): _attrs_ = ['cppthis', 'method'] @@ -605,9 +633,9 @@ _attrs_ = ['space', 'scope', 'converter', 'offset'] _immutable_fields = ['scope', 'converter', 'offset'] - def __init__(self, space, containing_scope, type_name, offset): + def __init__(self, space, declaring_scope, type_name, offset): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.converter = converter.get_converter(self.space, type_name, '') self.offset = offset @@ -717,7 +745,10 @@ # create the overload methods from the method sets for pyname, methods in methods_temp.iteritems(): CPPMethodSort(methods).sort() - overload = W_CPPOverload(self.space, self, methods[:]) + if pyname == self.name: + overload = W_CPPConstructorOverload(self.space, self, methods[:]) + else: + overload = W_CPPOverload(self.space, self, methods[:]) self.methods[pyname] = overload def full_name(self): @@ -857,14 +888,13 @@ class W_CPPClass(W_CPPScope): - _attrs_ = ['space', 'default_constructor', 'name', 'handle', 'methods', 'datamembers'] - _immutable_fields_ = ['kind', 'default_constructor', 'methods[*]', 'datamembers[*]'] + _attrs_ = ['space', 'name', 'handle', 'methods', 'datamembers'] + _immutable_fields_ = ['kind', 'constructor', 'methods[*]', 'datamembers[*]'] kind = "class" def __init__(self, space, name, opaque_handle): W_CPPScope.__init__(self, space, name, opaque_handle) - self.default_constructor = None def _make_cppfunction(self, pyname, index): num_args = capi.c_method_num_args(self.space, self, index) @@ -876,8 +906,6 @@ arg_defs.append((arg_type, arg_dflt)) if capi.c_is_constructor(self.space, self, index): cppfunction = CPPConstructor(self.space, self, index, arg_defs, args_required) - if args_required == 0: - self.default_constructor = cppfunction elif capi.c_method_is_template(self.space, self, index): templ_args = capi.c_template_args(self.space, self, index) cppfunction = CPPTemplatedCall(self.space, templ_args, self, index, arg_defs, args_required) @@ -905,9 +933,7 @@ self.datamembers[datamember_name] = datamember def construct(self): - if self.default_constructor is not None: - return self.default_constructor.call(capi.C_NULL_OBJECT, []) - raise self.missing_attribute_error("default_constructor") + return self.get_overload(self.name).call(None, []) def find_overload(self, name): raise self.missing_attribute_error(name) @@ -1046,6 +1072,16 @@ raise return None + def instance__init__(self, args_w): + try: + constructor_overload = self.cppclass.get_overload(self.cppclass.name) + constructor_overload.call(self, args_w) + except OperationError, e: + if not e.match(self.space, self.space.w_AttributeError): + raise + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name)) + def instance__eq__(self, w_other): # special case: if other is None, compare pointer-style if self.space.is_w(w_other, self.space.w_None): @@ -1128,6 +1164,7 @@ 'CPPInstance', cppclass = interp_attrproperty('cppclass', cls=W_CPPInstance), _python_owns = GetSetProperty(W_CPPInstance.fget_python_owns, W_CPPInstance.fset_python_owns), + __init__ = interp2app(W_CPPInstance.instance__init__), __eq__ = interp2app(W_CPPInstance.instance__eq__), __ne__ = interp2app(W_CPPInstance.instance__ne__), __nonzero__ = interp2app(W_CPPInstance.instance__nonzero__), diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,30 +1,31 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible # with info from multiple dictionaries and do not need to bother with meta # classes for inheritance. Both are python classes, though, and refactoring # may be in order at some point. -class CppyyScopeMeta(type): +class CPPScope(type): def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) -class CppyyNamespaceMeta(CppyyScopeMeta): +class CPPNamespace(CPPScope): def __dir__(cls): return cls._cpp_proxy.__dir__() -class CppyyClassMeta(CppyyScopeMeta): +class CPPClass(CPPScope): pass -# class CppyyClass defined in _init_pythonify() +# class CPPInstance defined in _init_pythonify() -class CppyyTemplateType(object): +class CPPTemplate(object): def __init__(self, name, scope=None): self._name = name if scope is None: @@ -91,7 +92,7 @@ # build up a representation of a C++ namespace (namespaces are classes) # create a meta class to allow properties (for static data write access) - metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) + metans = type(CPPNamespace)(namespace_name+'_meta', (CPPNamespace,), {}) if cppns: d = {"_cpp_proxy" : cppns} @@ -137,21 +138,14 @@ break return tuple(bases) -def make_new(class_name, cppclass): - try: - constructor_overload = cppclass.get_overload(cppclass.type_name) - except AttributeError: - msg = "cannot instantiate abstract class '%s'" % class_name - def __new__(cls, *args): - raise TypeError(msg) - else: - def __new__(cls, *args): - # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) - if not instance.__class__ is cls: - instance.__class__ = cls # happens for derived class - return instance +def make_new(class_name): + def __new__(cls, *args): + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -159,7 +153,7 @@ # get a list of base classes for class creation bases = [get_pycppclass(base) for base in cppclass.get_base_names()] if not bases: - bases = [CppyyClass,] + bases = [CPPInstance,] else: # it's technically possible that the required class now has been built # if one of the base classes uses it in e.g. a function interface @@ -170,7 +164,7 @@ # create a meta class to allow properties (for static data write access) metabases = [type(base) for base in bases] - metacpp = type(CppyyClassMeta)(class_name+'_meta', _drop_cycles(metabases), {}) + metacpp = type(CPPClass)(class_name+'_meta', _drop_cycles(metabases), {}) # create the python-side C++ class representation def dispatch(self, name, signature): @@ -178,7 +172,7 @@ return types.MethodType(make_method(name, cppol), self, type(self)) d = {"_cpp_proxy" : cppclass, "__dispatch__" : dispatch, - "__new__" : make_new(class_name, cppclass), + "__new__" : make_new(class_name), } pycppclass = metacpp(class_name, _drop_cycles(bases), d) @@ -214,7 +208,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(template_name, scope) + return CPPTemplate(template_name, scope) def get_pycppitem(scope, name): @@ -309,7 +303,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] @@ -426,15 +420,12 @@ # at pypy-c startup, rather than on the "import cppyy" statement import cppyy - # top-level classes - global CppyyClass - class CppyyClass(cppyy.CPPInstance): - __metaclass__ = CppyyClassMeta - - def __init__(self, *args, **kwds): - # self is only a placeholder; now create the actual C++ object - args = (self,) + args - self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) + # root of all proxy classes: CPPInstance in pythonify exists to combine the + # CPPClass meta class with the interp-level CPPInstanceBase + global CPPInstance + class CPPInstance(cppyy.CPPInstanceBase): + __metaclass__ = CPPClass + pass # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -4,17 +4,22 @@ #include #include #include +#include #include #include +#include #include #include +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + // add example01.cxx code int globalAddOneToInt(int a); namespace dummy { #include "example01.cxx" +#include "datatypes.cxx" } int globalAddOneToInt(int a) { @@ -27,168 +32,307 @@ typedef std::map Handles_t; static Handles_t s_handles; +enum EMethodType { kNormal=0, kConstructor=1, kStatic=2 }; + struct Cppyy_PseudoMethodInfo { Cppyy_PseudoMethodInfo(const std::string& name, const std::vector& argtypes, - const std::string& returntype) : - m_name(name), m_argtypes(argtypes), m_returntype(returntype) {} + const std::string& returntype, + EMethodType mtype = kNormal) : + m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} std::string m_name; std::vector m_argtypes; std::string m_returntype; + EMethodType m_type; +}; + +struct Cppyy_PseudoDatambrInfo { + Cppyy_PseudoDatambrInfo(const std::string& name, + const std::string& type, + size_t offset, bool isstatic) : + m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} + + std::string m_name; + std::string m_type; + size_t m_offset; + bool m_isstatic; }; struct Cppyy_PseudoClassInfo { Cppyy_PseudoClassInfo() {} - Cppyy_PseudoClassInfo(const std::vector& methods) : - m_methods(methods ) {} + Cppyy_PseudoClassInfo(const std::vector& methods, + long method_offset, + const std::vector& data) : + m_methods(methods), m_method_offset(method_offset), m_datambrs(data) {} std::vector m_methods; + long m_method_offset; + std::vector m_datambrs; }; typedef std::map Scopes_t; static Scopes_t s_scopes; -static int example01_last_static_method = 0; -static int example01_last_constructor = 0; -static int payload_methods_offset = 0; +static std::map s_methods; + +#define PUBLIC_CPPYY_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ + offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname, argtypes, #dmtype)); \ + s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + argtypes.push_back(#dmtype); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname, argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + argtypes.clear(); \ + argtypes.push_back("const "#dmtype"&"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname"_c", argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ + PUBLIC_CPPYY_DATA(dmname, dmtype); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array2", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ + PUBLIC_CPPYY_DATA2(dmname, dmtype); \ + argtypes.push_back(#dmtype"*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + argtypes.clear(); argtypes.push_back("void*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_void_array_"#key, argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + +#define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ + (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- - static long s_scope_id = 0; + static long s_scope_id = 0; + static long s_method_id = 0; { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // ( 0) static double staticAddToDouble(double a) + // static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double", kStatic)); + s_methods["static_example01::staticAddToDouble_double"] = s_method_id++; - // ( 1) static int staticAddOneToInt(int a) - // ( 2) static int staticAddOneToInt(int a, int b) + // static int staticAddOneToInt(int a) + // static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int_int"] = s_method_id++; - // ( 3) static int staticAtoi(const char* str) + // static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int", kStatic)); + s_methods["static_example01::staticAtoi_cchar*"] = s_method_id++; - // ( 4) static char* staticStrcpy(const char* strin) - methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // static char* staticStrcpy(const char* strin) + methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*", kStatic)); + s_methods["static_example01::staticStrcpy_cchar*"] = s_method_id++; - // ( 5) static void staticSetPayload(payload* p, double d) - // ( 6) static payload* staticCyclePayload(payload* p, double d) - // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + // static void staticSetPayload(payload* p, double d) + // static payload* staticCyclePayload(payload* p, double d) + // static payload staticCopyCyclePayload(payload* p, double d) argtypes.clear(); argtypes.push_back("payload*"); argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void", kStatic)); + s_methods["static_example01::staticSetPayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*", kStatic)); + s_methods["static_example01::staticCyclePayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload", kStatic)); + s_methods["static_example01::staticCopyCyclePayload_payload*_double"] = s_method_id++; - // ( 8) static int getCount() - // ( 9) static void setCount(int) + // static int getCount() + // static void setCount(int) argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int", kStatic)); + s_methods["static_example01::getCount"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void", kStatic)); + s_methods["static_example01::setCount_int"] = s_method_id++; - // cut-off is used in cppyy_is_static - example01_last_static_method = methods.size(); + // example01() + // example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01"] = s_method_id++; + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01_int"] = s_method_id++; - // (10) example01() - // (11) example01(int a) - argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - - // cut-off is used in cppyy_is_constructor - example01_last_constructor = methods.size(); - - // (12) int addDataToInt(int a) + // int addDataToInt(int a) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + s_methods["example01::addDataToInt_int"] = s_method_id++; - // (13) int addDataToIntConstRef(const int& a) + // int addDataToIntConstRef(const int& a) argtypes.clear(); argtypes.push_back("const int&"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + s_methods["example01::addDataToIntConstRef_cint&"] = s_method_id++; - // (14) int overloadedAddDataToInt(int a, int b) + // int overloadedAddDataToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int"] = s_method_id++; - // (15) int overloadedAddDataToInt(int a) - // (16) int overloadedAddDataToInt(int a, int b, int c) + // int overloadedAddDataToInt(int a) + // int overloadedAddDataToInt(int a, int b, int c) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); - + s_methods["example01::overloadedAddDataToInt_int"] = s_method_id++; argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int_int"] = s_method_id++; - // (17) double addDataToDouble(double a) + // double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + s_methods["example01::addDataToDouble_double"] = s_method_id++; - // (18) int addDataToAtoi(const char* str) - // (19) char* addToStringValue(const char* str) + // int addDataToAtoi(const char* str) + // char* addToStringValue(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + s_methods["example01::addDataToAtoi_cchar*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + s_methods["example01::addToStringValue_cchar*"] = s_method_id++; - // (20) void setPayload(payload* p) - // (21) payload* cyclePayload(payload* p) - // (22) payload copyCyclePayload(payload* p) + // void setPayload(payload* p) + // payload* cyclePayload(payload* p) + // payload copyCyclePayload(payload* p) argtypes.clear(); argtypes.push_back("payload*"); methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + s_methods["example01::setPayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + s_methods["example01::cyclePayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + s_methods["example01::copyCyclePayload_payload*"] = s_method_id++; - payload_methods_offset = methods.size(); - - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class example01 + //==================================================================== + { // class payload -- s_handles["payload"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // (23) payload(double d = 0.) + // payload(double d = 0.) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor", kConstructor)); + s_methods["payload::payload_double"] = s_method_id++; - // (24) double getData() + // double getData() argtypes.clear(); methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + s_methods["payload::getData"] = s_method_id++; - // (25) void setData(double d) + // void setData(double d) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + s_methods["payload::setData_double"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class payload + + //==================================================================== + + { // class cppyy_test_data -- + s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // cppyy_test_data() + std::vector argtypes; + methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); + s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + + methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); + s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + + std::vector data; + PUBLIC_CPPYY_DATA2(bool, bool); + PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (uchar, unsigned char); + PUBLIC_CPPYY_DATA3(short, short, h); + PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); + PUBLIC_CPPYY_DATA3(int, int, i); + PUBLIC_CPPYY_DATA3(uint, unsigned int, I); + PUBLIC_CPPYY_DATA3(long, long, l); + PUBLIC_CPPYY_DATA3(ulong, unsigned long, L); + PUBLIC_CPPYY_DATA (llong, long long); + PUBLIC_CPPYY_DATA (ullong, unsigned long long); + PUBLIC_CPPYY_DATA3(float, float, f); + PUBLIC_CPPYY_DATA3(double, double, d); + PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (voidp, void*); + + PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); + PUBLIC_CPPYY_STATIC_DATA(short, short); + PUBLIC_CPPYY_STATIC_DATA(ushort, unsigned short); + PUBLIC_CPPYY_STATIC_DATA(int, int); + PUBLIC_CPPYY_STATIC_DATA(uint, unsigned int); + PUBLIC_CPPYY_STATIC_DATA(long, long); + PUBLIC_CPPYY_STATIC_DATA(ulong, unsigned long); + PUBLIC_CPPYY_STATIC_DATA(llong, long long); + PUBLIC_CPPYY_STATIC_DATA(ullong, unsigned long long); + PUBLIC_CPPYY_STATIC_DATA(float, float); + PUBLIC_CPPYY_STATIC_DATA(double, double); + PUBLIC_CPPYY_STATIC_DATA(enum, cppyy_test_data::what); + PUBLIC_CPPYY_STATIC_DATA(voidp, void*); + + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size(), data); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class cppyy_test_data + } } _init; @@ -230,155 +374,387 @@ /* method/function dispatching -------------------------------------------- */ void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - switch ((long)method) { - case 5: // static void example01:;staticSetPayload(payload* p, double d) + long idx = (long)method; + if (idx == s_methods["static_example01::staticSetPayload_payload*_double"]) { assert(!self && nargs == 2); dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 9: // static void example01::setCount(int) + } else if (idx == s_methods["static_example01::setCount_int"]) { assert(!self && nargs == 1); dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); - break; - case 20: // void example01::setPayload(payload* p); + } else if (idx == s_methods["example01::setPayload_payload*"]) { assert(self && nargs == 1); ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::destroy_arrays"]) { + assert(self && nargs == 0); + ((dummy::cppyy_test_data*)self)->destroy_arrays(); + } else if (idx == s_methods["cppyy_test_data::set_bool"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_char"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["cppyy_test_data::set_uchar"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); + } else if (idx == s_methods["cppyy_test_data::set_short"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); + } else if (idx == s_methods["cppyy_test_data::set_short_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short_c(*(short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ushort"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); + } else if (idx == s_methods["cppyy_test_data::set_ushort_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort_c(*(unsigned short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_int"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_int_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int_c(*(int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_uint"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["cppyy_test_data::set_uint_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint_c(*(unsigned int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_long"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["cppyy_test_data::set_long_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long_c(*(long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ulong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); + } else if (idx == s_methods["cppyy_test_data::set_ulong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_llong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); + } else if (idx == s_methods["cppyy_test_data::set_llong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong_c(*(long long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ullong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); + } else if (idx == s_methods["cppyy_test_data::set_ullong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_float"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); + } else if (idx == s_methods["cppyy_test_data::set_float_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float_c(*(float*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_double"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::set_double_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double_c(*(double*)&((CPPYY_G__value*)args)[0]); + } else { assert(!"method unknown in cppyy_call_v"); - break; } } +unsigned char cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + unsigned char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_bool"]) { + assert(self && nargs == 0); + result = (unsigned char)((dummy::cppyy_test_data*)self)->get_bool(); + } else { + assert(!"method unknown in cppyy_call_b"); + } + return result; +} + +char cppyy_call_c(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_char"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_char(); + } else if (idx == s_methods["cppyy_test_data::get_uchar"]) { + assert(self && nargs == 0); + result = (char)((dummy::cppyy_test_data*)self)->get_uchar(); + } else { + assert(!"method unknown in cppyy_call_c"); + } + return result; +} + +short cppyy_call_h(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + short result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_short"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_short(); + } else if (idx == s_methods["cppyy_test_data::get_ushort"]) { + assert(self && nargs == 0); + result = (short)((dummy::cppyy_test_data*)self)->get_ushort(); + } else { + assert(!"method unknown in cppyy_call_h"); + } + return result; +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; - switch ((long)method) { - case 1: // static int example01::staticAddOneToInt(int) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddOneToInt_int"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 2: // static int example01::staticAddOneToInt(int, int) + } else if (idx == s_methods["static_example01::staticAddOneToInt_int_int"]) { assert(!self && nargs == 2); result = dummy::example01::staticAddOneToInt( ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); - break; - case 3: // static int example01::staticAtoi(const char* str) + } else if (idx == s_methods["static_example01::staticAtoi_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 8: // static int example01::getCount() + } else if (idx == s_methods["static_example01::getCount"]) { assert(!self && nargs == 0); result = dummy::example01::getCount(); - break; - case 12: // int example01::addDataToInt(int a) + } else if (idx == s_methods["example01::addDataToInt_int"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 18: // int example01::addDataToAtoi(const char* str) + } else if (idx == s_methods["example01::addDataToAtoi_cchar*"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_int"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_int(); + } else { assert(!"method unknown in cppyy_call_i"); - break; } return result; } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { long result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = (long)dummy::example01::staticStrcpy( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + } else if (idx == s_methods["static_example01::staticCyclePayload_payload*_double"]) { assert(!self && nargs == 2); result = (long)dummy::example01::staticCyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 19: // char* example01::addToStringValue(const char* str) + } else if (idx == s_methods["example01::addToStringValue_cchar*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->addToStringValue( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 21: // payload* example01::cyclePayload(payload* p) + } else if (idx == s_methods["example01::cyclePayload_payload*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_uint"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint(); + } else if (idx == s_methods["cppyy_test_data::get_long"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_long(); + } else if (idx == s_methods["cppyy_test_data::get_ulong"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array2(); + } else if (idx == s_methods["cppyy_test_data::get_short_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array(); + } else if (idx == s_methods["cppyy_test_data::get_short_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array2(); + } else if (idx == s_methods["cppyy_test_data::get_int_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array(); + } else if (idx == s_methods["cppyy_test_data::get_int_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array2(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array2(); + } else if (idx == s_methods["cppyy_test_data::get_long_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array(); + } else if (idx == s_methods["cppyy_test_data::get_long_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array2(); + } else if (idx == s_methods["cppyy_test_data::pass_array_short"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_h"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_h( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ushort"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_H"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_H( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_int"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_i"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_i( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_uint"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_I"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_I( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_long"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_l"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_l( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ulong"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_L"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_L( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_float"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_f"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_f( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_double"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_d"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_d( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else { assert(!"method unknown in cppyy_call_l"); - break; } return result; } +long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + long long result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_llong"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_llong(); + } else if (idx == s_methods["cppyy_test_data::get_ullong"]) { + assert(self && nargs == 0); + result = (long long)((dummy::cppyy_test_data*)self)->get_ullong(); + } else { + assert(!"method unknown in cppyy_call_ll"); + } + return result; +} + +float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + float result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_float"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_float(); + } else { + assert(!"method unknown in cppyy_call_f"); + } + return result; +} + double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; - switch ((long)method) { - case 0: // static double example01::staticAddToDouble(double) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddToDouble_double"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddToDouble(((CPPYY_G__value*)args)[0].obj.d); - break; - case 17: // double example01::addDataToDouble(double a) + } else if (idx == s_methods["example01::addDataToDouble_double"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToDouble(((CPPYY_G__value*)args)[0].obj.d); - break; - case 24: // double payload::getData() + } else if (idx == s_methods["payload::getData"]) { assert(self && nargs == 0); result = ((dummy::payload*)self)->getData(); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_double"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_double(); + } else { assert(!"method unknown in cppyy_call_d"); - break; } return result; } char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { char* result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticStrcpy((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else { assert(!"method unknown in cppyy_call_s"); - break; } return result; } cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t handle, int nargs, void* args) { void* result = 0; - if (handle == s_handles["example01"]) { - switch ((long)method) { - case 10: - assert(nargs == 0); - result = new dummy::example01; - break; - case 11: - assert(nargs == 1); - result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); - break; - default: - assert(!"method of example01 unknown in cppyy_constructor"); - break; - } - } else if (handle == s_handles["payload"]) { - switch ((long)method) { - case 23: - if (nargs == 0) result = new dummy::payload; - else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); - break; - default: - assert(!"method payload unknown in cppyy_constructor"); - break; - } + const long idx = (long)method; + if (idx == s_methods["example01::example01"]) { + assert(nargs == 0); + result = new dummy::example01; + } else if (idx == s_methods["example01::example01_int"]) { + assert(nargs == 1); + result = new dummy::example01(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["payload::payload_double"]) { + assert(nargs == 0 || nargs == 1); + if (nargs == 0) result = new dummy::payload; + else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::cppyy_test_data"]) { + assert(nargs == 0); + result = new dummy::cppyy_test_data; + } else { + assert(!"method unknown in cppyy_constructor"); } return (cppyy_object_t)result; } @@ -486,10 +862,10 @@ } cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return (cppyy_method_t)method_index; - else if (handle == s_handles["payload"]) - return (cppyy_method_t)((long)method_index + payload_methods_offset); + if (s_scopes.find(handle) != s_scopes.end()) { + long id = s_scopes[handle].m_method_offset + (long)method_index; + return (cppyy_method_t)id; + } assert(!"unknown class in cppyy_get_method"); return (cppyy_method_t)0; } @@ -497,26 +873,45 @@ /* method properties ----------------------------------------------------- */ int cppyy_is_constructor(cppyy_type_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return example01_last_static_method <= method_index - && method_index < example01_last_constructor; - else if (handle == s_handles["payload"]) - return (long)method_index == 0; + if (s_scopes.find(handle) != s_scopes.end()) + return s_scopes[handle].m_methods[method_index].m_type == kConstructor; + assert(!"unknown class in cppyy_is_constructor"); return 0; } int cppyy_is_staticmethod(cppyy_type_t handle, cppyy_index_t method_index) { - if (handle == s_handles["example01"]) - return method_index < example01_last_static_method ? 1 : 0; - if (handle == s_handles["payload"]) - return 0; + if (s_scopes.find(handle) != s_scopes.end()) + return s_scopes[handle].m_methods[method_index].m_type == kStatic; + assert(!"unknown class in cppyy_is_staticmethod"); + return 0; +} + + +/* data member reflection information ------------------------------------- */ +int cppyy_num_datamembers(cppyy_scope_t handle) { + return s_scopes[handle].m_datambrs.size(); +} + +char* cppyy_datamember_name(cppyy_scope_t handle, int idatambr) { + return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_name); +} + +char* cppyy_datamember_type(cppyy_scope_t handle, int idatambr) { + return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); +} + +size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { + return s_scopes[handle].m_datambrs[idatambr].m_offset; +} + + +/* data member properties ------------------------------------------------ */ +int cppyy_is_publicdata(cppyy_scope_t handle, int idatambr) { return 1; } - -/* data member reflection information ------------------------------------- */ -int cppyy_num_datamembers(cppyy_scope_t /* handle */) { - return 0; +int cppyy_is_staticdata(cppyy_scope_t handle, int idatambr) { + return s_scopes[handle].m_datambrs[idatambr].m_isstatic; } diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -7,14 +7,18 @@ if 'dummy' in lcapi.reflection_library: # run only tests that are covered by the dummy backend and tests # that do not rely on reflex - if not ('test_helper.py' in item.location[0] or \ - 'test_cppyy.py' in item.location[0] or \ - 'test_pythonify.py' in item.location[0]): + import os + tst = os.path.basename(item.location[0]) + if not tst in ('test_helper.py', 'test_cppyy.py', 'test_pythonify.py', + 'test_datatypes.py'): py.test.skip("genreflex is not installed") import re - if 'test_pythonify.py' in item.location[0] and \ + if tst == 'test_pythonify.py' and \ not re.search("AppTestPYTHONIFY.test0[1-6]", item.location[2]): py.test.skip("genreflex is not installed") + elif tst == 'test_datatypes.py' and \ + not re.search("AppTestDATATYPES.test0[1-8]", item.location[2]): + py.test.skip("genreflex is not installed") def pytest_ignore_collect(path, config): if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -16,9 +16,9 @@ class four_vector { public: four_vector(double x, double y, double z, double t) : - m_x(x), m_y(y), m_z(z), m_t(t), m_cc_called(false) {} + m_cc_called(false), m_x(x), m_y(y), m_z(z), m_t(t) {} four_vector(const four_vector& s) : - m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t), m_cc_called(true) {} + m_cc_called(true), m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t) {} From noreply at buildbot.pypy.org Fri May 2 00:07:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 00:07:05 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: cleanup Message-ID: <20140501220705.542981D2BE1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: issue1430 Changeset: r71159:527d7902252d Date: 2014-05-01 18:03 -0400 http://bitbucket.org/pypy/pypy/changeset/527d7902252d/ Log: cleanup diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -18,7 +18,7 @@ from rpython.rlib.rsocket import rsocket_startup rsocket_startup() from pypy.module._socket.interp_func import State - space.fromcache(State).alloc_lock(space) + space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -42,7 +42,7 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).gethostbyxxx_lock + lock = space.fromcache(State).netdb_lock try: res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: @@ -56,7 +56,7 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ - lock = space.fromcache(State).gethostbyxxx_lock + lock = space.fromcache(State).netdb_lock try: res = rsocket.gethostbyaddr(host, lock) except SocketError, e: @@ -315,8 +315,7 @@ class State(object): def __init__(self, space): - self.gethostbyxxx_lock = None + self.netdb_lock = None - def alloc_lock(self, space): - self.gethostbyxxx_lock = space.allocate_lock() - + def startup(self, space): + self.netdb_lock = space.allocate_lock() diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -84,10 +84,10 @@ import threading nthreads = 10 ip = '8.8.8.8' - lock = threading.Lock() - domain = gethostbyaddr(ip, lock)[0] + domain = gethostbyaddr(ip)[0] result = [0] * nthreads threads = [None] * nthreads + lock = threading.Lock() def lookup_addr(ip, i): name, aliases, address_list = gethostbyaddr(ip, lock) if name == domain: From noreply at buildbot.pypy.org Fri May 2 00:07:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 00:07:06 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: use dummy_lock from rthread Message-ID: <20140501220706.99F201D2BE1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: issue1430 Changeset: r71160:9ff62e96a9ed Date: 2014-05-01 17:51 -0400 http://bitbucket.org/pypy/pypy/changeset/9ff62e96a9ed/ Log: use dummy_lock from rthread diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -18,6 +18,7 @@ from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib import _rsocket_rffi as _c from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rthread import dummy_lock from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET @@ -1124,22 +1125,14 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -class DummyLock(object): - def __enter__(self): - pass - - def __exit__(self, *args): - pass - - -def gethostbyname_ex(name, lock=DummyLock()): +def gethostbyname_ex(name, lock=dummy_lock): # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) with lock: hostent = _c.gethostbyname(name) return gethost_common(name, hostent, addr) -def gethostbyaddr(ip, lock=DummyLock()): +def gethostbyaddr(ip, lock=dummy_lock): # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) From noreply at buildbot.pypy.org Fri May 2 01:48:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 01:48:18 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140501234818.CDC081C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71161:1bb8ba9c96df Date: 2014-05-01 19:46 -0400 http://bitbucket.org/pypy/pypy/changeset/1bb8ba9c96df/ Log: cleanup diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -249,9 +249,8 @@ raises(TypeError, ast.Num, 1, 2, lineno=0) def test_issue1680_nonseq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -292,9 +291,8 @@ assert not hasattr(mod.body[0], 'name') def test_issue1680_seq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -397,9 +395,8 @@ import ast num_node = ast.Num(n=2, lineno=2, col_offset=3) dict_res = num_node.__dict__ - assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Num_notfullinit(self): import ast import copy @@ -407,7 +404,7 @@ assert num_node.n == 2 assert num_node.lineno == 2 num_node2 = copy.deepcopy(num_node) - + def test_issue1673_Num_fullinit(self): import ast import copy @@ -418,7 +415,7 @@ assert num_node.col_offset == num_node2.col_offset dict_res = num_node2.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Str(self): import ast import copy diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -60,7 +60,6 @@ class AppTestTypeObject: - def test_abstract_methods(self): class X(object): pass @@ -427,8 +426,7 @@ assert f.__call__() == ((), {}) assert f.__call__("hello", "world") == (("hello", "world"), {}) assert f.__call__(5, bla=6) == ((5,), {"bla": 6}) - assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, - "c": 3}) + assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, "c": 3}) def test_multipleinheritance_fail(self): try: @@ -539,7 +537,6 @@ assert ImmutableDoc.__doc__ == 'foo' def test_metaclass_conflict(self): - class T1(type): pass class T2(type): @@ -555,7 +552,7 @@ def test_metaclass_choice(self): events = [] - + class T1(type): def __new__(*args): events.append(args) @@ -577,7 +574,7 @@ assert type(D1) is T1 assert type(C) is T1 assert type(G) is T1 - + def test_descr_typecheck(self): raises(TypeError,type.__dict__['__name__'].__get__,1) raises(TypeError,type.__dict__['__mro__'].__get__,1) @@ -806,7 +803,7 @@ z2 = Z2() z2.__class__ = Z1 assert z2.__class__ == Z1 - + class I(int): pass class F(float): @@ -825,13 +822,12 @@ pass i = I() - i2 = I() i.__class__ = I2 i2.__class__ = I assert i.__class__ == I2 assert i2.__class__ == I - + i3 = I3() raises(TypeError, "i3.__class__ = I2") i3.__class__ = I4 From noreply at buildbot.pypy.org Fri May 2 01:48:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 01:48:20 +0200 (CEST) Subject: [pypy-commit] pypy default: fix attribute error message for heap types Message-ID: <20140501234820.0BFDF1C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71162:02389a744344 Date: 2014-05-01 19:44 -0400 http://bitbucket.org/pypy/pypy/changeset/02389a744344/ Log: fix attribute error message for heap types diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -70,6 +70,13 @@ raises(AttributeError, getattr, type, "__abstractmethods__") raises(TypeError, "int.__abstractmethods__ = ('abc', )") + def test_attribute_error(self): + class X(object): + __module__ = 'test' + x = X() + exc = raises(AttributeError, "x.a") + assert str(exc.value) == "'X' object has no attribute 'a'" + def test_call_type(self): assert type(42) is int C = type('C', (object,), {'x': lambda: 42}) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -489,11 +489,12 @@ def get_module_type_name(w_self): space = w_self.space - w_mod = w_self.get_module() - if space.isinstance_w(w_mod, space.w_str): - mod = space.str_w(w_mod) - if mod != '__builtin__': - return '%s.%s' % (mod, w_self.name) + if not w_self.is_heaptype(): + w_mod = w_self.get_module() + if space.isinstance_w(w_mod, space.w_str): + mod = space.str_w(w_mod) + if mod != '__builtin__': + return '%s.%s' % (mod, w_self.name) return w_self.name def getname(w_self, space): From noreply at buildbot.pypy.org Fri May 2 01:55:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 May 2014 01:55:34 +0200 (CEST) Subject: [pypy-commit] pypy default: py3 compat Message-ID: <20140501235534.526741C0A66@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71163:da193f0b119d Date: 2014-05-01 16:55 -0700 http://bitbucket.org/pypy/pypy/changeset/da193f0b119d/ Log: py3 compat diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.func_code: + if entry.code == f1.__code__: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -219,12 +219,12 @@ lines.remove(line) break else: - print 'NOT FOUND:', pattern.rstrip('\n') - print '--- GOT ---' - print got - print - print '--- EXPECTED ---' - print expected + print('NOT FOUND: %s' % pattern.rstrip('\n')) + print('--- GOT ---') + print(got) + print() + print('--- EXPECTED ---') + print(expected) assert False assert not lines finally: From noreply at buildbot.pypy.org Fri May 2 02:00:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 02:00:18 +0200 (CEST) Subject: [pypy-commit] pypy issue1430: close branch for merging Message-ID: <20140502000018.A8C9F1C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: issue1430 Changeset: r71164:a0b02011d93b Date: 2014-05-01 19:50 -0400 http://bitbucket.org/pypy/pypy/changeset/a0b02011d93b/ Log: close branch for merging From noreply at buildbot.pypy.org Fri May 2 02:00:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 02:00:20 +0200 (CEST) Subject: [pypy-commit] pypy default: merge branch issue1430 Message-ID: <20140502000020.5D93A1C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71165:9fe91ecfce83 Date: 2014-05-01 19:50 -0400 http://bitbucket.org/pypy/pypy/changeset/9fe91ecfce83/ Log: merge branch issue1430 diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,6 +17,8 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() + from pypy.module._socket.interp_func import State + space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -42,8 +42,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host) + res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -55,8 +56,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host) + res = rsocket.gethostbyaddr(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -310,3 +312,10 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) + +class State(object): + def __init__(self, space): + self.netdb_lock = None + + def startup(self, space): + self.netdb_lock = space.allocate_lock() diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -10,6 +10,7 @@ mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) # force computation and record what we wrap module = mod.Module(space, W_Root()) + module.startup(space) for name in module.loaders: seeobj_w.append(module._load_lazily(space, name)) if hasattr(module, 'submodules'): diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -30,7 +30,7 @@ 'stdio.h', 'netdb.h', 'arpa/inet.h', - 'stdint.h', + 'stdint.h', 'errno.h', ) if _HAS_AF_PACKET: @@ -139,7 +139,7 @@ EAI_SOCKTYPE EAI_SYSTEM IPPROTO_AH IPPROTO_BIP IPPROTO_DSTOPTS IPPROTO_EGP IPPROTO_EON IPPROTO_ESP -IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS +IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS IPPROTO_ICMPV6 IPPROTO_IDP IPPROTO_IGMP IPPROTO_IPCOMP IPPROTO_IPIP IPPROTO_IPV4 IPPROTO_IPV6 IPPROTO_MAX IPPROTO_MOBILE IPPROTO_ND IPPROTO_NONE IPPROTO_PIM IPPROTO_PUP IPPROTO_ROUTING IPPROTO_RSVP IPPROTO_TCP IPPROTO_TP @@ -174,7 +174,7 @@ SOCK_DGRAM SOCK_RAW SOCK_RDM SOCK_SEQPACKET SOCK_STREAM -SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE +SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE SO_ACCEPTCONN SO_BROADCAST SO_DEBUG SO_DONTROUTE SO_ERROR SO_EXCLUSIVEADDRUSE SO_KEEPALIVE SO_LINGER SO_OOBINLINE SO_RCVBUF SO_RCVLOWAT SO_RCVTIMEO @@ -286,7 +286,7 @@ ('nl_pid', rffi.INT), ('nl_groups', rffi.INT)], ifdef='AF_NETLINK') - + CConfig.addrinfo = platform.Struct('struct addrinfo', [('ai_flags', rffi.INT), ('ai_family', rffi.INT), diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -18,6 +18,7 @@ from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib import _rsocket_rffi as _c from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rthread import dummy_lock from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET @@ -1124,22 +1125,24 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name): - # XXX use gethostbyname_r() if available, and/or use locks if not +def gethostbyname_ex(name, lock=dummy_lock): + # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) - hostent = _c.gethostbyname(name) - return gethost_common(name, hostent, addr) + with lock: + hostent = _c.gethostbyname(name) + return gethost_common(name, hostent, addr) -def gethostbyaddr(ip): - # XXX use gethostbyaddr_r() if available, and/or use locks if not +def gethostbyaddr(ip, lock=dummy_lock): + # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - p, size = addr.lock_in_addr() - try: - hostent = _c.gethostbyaddr(p, size, addr.family) - finally: - addr.unlock() - return gethost_common(ip, hostent, addr) + with lock: + p, size = addr.lock_in_addr() + try: + hostent = _c.gethostbyaddr(p, size, addr.family) + finally: + addr.unlock() + return gethost_common(ip, hostent, addr) def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -3,6 +3,7 @@ from rpython.rlib.rsocket import * import socket as cpy_socket + def setup_module(mod): rsocket_startup() @@ -61,6 +62,43 @@ py.test.fail("could not find the localhost address in %r" % (address_list,)) +def test_thread_safe_gethostbyname_ex(): + import threading + nthreads = 10 + domain = 'google.com' + result = [0] * nthreads + threads = [None] * nthreads + lock = threading.Lock() + def lookup_name(i): + name, aliases, address_list = gethostbyname_ex(domain, lock) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_name, args=[i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + +def test_thread_safe_gethostbyaddr(): + import threading + nthreads = 10 + ip = '8.8.8.8' + domain = gethostbyaddr(ip)[0] + result = [0] * nthreads + threads = [None] * nthreads + lock = threading.Lock() + def lookup_addr(ip, i): + name, aliases, address_list = gethostbyaddr(ip, lock) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_addr, args=[ip, i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + def test_gethostbyaddr(): try: cpy_socket.gethostbyaddr("::1") From noreply at buildbot.pypy.org Fri May 2 02:00:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 02:00:21 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140502000021.CA0761C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71166:b2df21f53687 Date: 2014-05-01 19:51 -0400 http://bitbucket.org/pypy/pypy/changeset/b2df21f53687/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,5 @@ .. startrev: 0524dae88c75 .. branch: reflex-support + +.. branch: issue1430 From noreply at buildbot.pypy.org Fri May 2 02:00:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 02:00:23 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140502000023.2675C1C0A66@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71167:2906656b373b Date: 2014-05-01 19:59 -0400 http://bitbucket.org/pypy/pypy/changeset/2906656b373b/ Log: merge heads diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.func_code: + if entry.code == f1.__code__: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -219,12 +219,12 @@ lines.remove(line) break else: - print 'NOT FOUND:', pattern.rstrip('\n') - print '--- GOT ---' - print got - print - print '--- EXPECTED ---' - print expected + print('NOT FOUND: %s' % pattern.rstrip('\n')) + print('--- GOT ---') + print(got) + print() + print('--- EXPECTED ---') + print(expected) assert False assert not lines finally: From noreply at buildbot.pypy.org Fri May 2 03:11:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 May 2014 03:11:16 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140502011116.772391C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71168:c7e7aac750cf Date: 2014-05-01 17:16 -0700 http://bitbucket.org/pypy/pypy/changeset/c7e7aac750cf/ Log: merge default diff too long, truncating to 2000 out of 10997 lines diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = types.ModuleType('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/pypy/doc/config/objspace.usemodules.oracle.txt b/pypy/doc/config/objspace.usemodules.oracle.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.txt +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -583,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -58,7 +58,6 @@ math mmap operator - oracle parser posix pyexpat diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,11 +1,17 @@ ======================================= -PyPy 2.3 - XXXX TODO +PyPy 2.3 - Easier Than Ever ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. -This release also contains several bugfixes and performance improvements. +This release also contains several bugfixes and performance improvements, +many generated by real users finding corner cases our `TDD`_ methods missed. +`CFFI`_ has made it easier than ever to use existing C code with both cpython +and PyPy, easing the transition for packages like `cryptography`_, `Pillow`_ +(Python Imaging Library [Fork]), a basic port of `pygame-cffi`_, and others. + +PyPy can now be embedded in a hosting application, for instance inside `uWSGI`_ You can download the PyPy 2.3 release here: @@ -17,72 +23,112 @@ Please consider donating more so we can finish those projects! The three projects are: -* Py3k (supporting Python 3.x): the release PyPy3 2.2 is imminent. +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. -* STM (software transactional memory): a preview will be released very soon, - as soon as we fix a few bugs +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs -* NumPy: the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. -.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +.. _`CFFI`: http://cffi.readthedocs.org +.. _`cryptography`: https://cryptography.io +.. _`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +.. _`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +.. _`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html What is PyPy? ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; +note that the latest cpython is not faster than cpython 2.7.2) due to its integrated tracing JIT compiler. -This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows -32, or ARM (ARMv6 or ARMv7, with VFPv3). +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. -Work on the native Windows 64 is still stalling, we would welcome a volunteer -to handle that. +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. .. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -* Our Garbage Collector is now "incremental". It should avoid almost - all pauses due to a major collection taking place. Previously, it - would pause the program (rarely) to walk all live objects, which - could take arbitrarily long if your process is using a whole lot of - RAM. Now the same work is done in steps. This should make PyPy - more responsive, e.g. in games. There are still other pauses, from - the GC and the JIT, but they should be on the order of 5 - milliseconds each. +Bugfixes +-------- -* The JIT counters for hot code were never reset, which meant that a - process running for long enough would eventually JIT-compile more - and more rarely executed code. Not only is it useless to compile - such code, but as more compiled code means more memory used, this - gives the impression of a memory leak. This has been tentatively - fixed by decreasing the counters from time to time. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. -* NumPy has been split: now PyPy only contains the core module, called - ``_numpypy``. The ``numpy`` module itself has been moved to - ``https://bitbucket.org/pypy/numpy`` and ``numpypy`` disappeared. - You need to install NumPy separately with a virtualenv: +* The ARM port no longer crashes on unaligned memory access to floats and doubles, + and singlefloats are supported in the JIT. + +* Generators are faster since they now skip unecessary cleanup + +* A first time contributor simplified JIT traces by adding integer bound + propagation in indexing and logical operations. + +* Optimize consecutive dictionary lookups of the same key in a chain + +* Our extensive pre-translation test suite now runs nightly on more platforms + +* Fix issues with reimporting builtin modules + +* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port + +.. _`HippyVM`: http://www.hippyvm.com + +New Platforms and Features +-------------------------- + +* Support for OpenBSD + +* Code cleanup: we continue to prune out old and unused code, and to refactor + large parts of the codebase. We have separated rpython from the PyPy python + interpreter, and rpython is seeing use in other dynamic language projects. + +* Support for precompiled headers in the build process for MSVC + +* Support for objects with __int__ and __float__ methods + +* Tweak support of errno in cpyext (the PyPy implemenation of the capi) + + +Numpy +----- +Numpy support has been split into a builtin ``_numpy`` module and a +fork of the numpy code base adapted to pypy at + ``https://bitbucket.org/pypy/numpy``. +You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; or directly: ``git clone https://bitbucket.org/pypy/numpy.git``; ``cd numpy``; ``pypy setup.py install``. -* non-inlined calls have less overhead +* NumPy support has been improved, many failures in indexing, dtypes, + and scalars were corrected. We are slowly approaching our goal of passing + the numpy test suite. We still do not support object or unicode ndarrays. -* Things that use ``sys.set_trace`` are now JITted (like coverage) +* speed of iteration in dot() is now within 1.5x of the numpy c + implementation (without BLAS acceleration). Since the same array + iterator is used throughout the ``_numpy`` module, speed increases should + be apparent in all Numpy functionality. -* JSON decoding is now very fast (JSON encoding was already very fast) +* Most of the core functionality of nditer has been implemented. -* various buffer copying methods experience speedups (like list-of-ints to - ``int[]`` buffer from cffi) +* A cffi-based ``numpy.random`` module is available as a branch in the numpy + repository, it will be merged soon after this release. -* We finally wrote (hopefully) all the missing ``os.xxx()`` functions, - including ``os.startfile()`` on Windows and a handful of rare ones - on Posix. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. -* numpy has a rudimentary C API that cooperates with ``cpyext`` +Cheers +The PyPy Team -Cheers, -Armin Rigo and Maciej Fijalkowski diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -42,6 +42,10 @@ use this sandboxed PyPy from a regular Python interpreter (CPython, or an unsandboxed PyPy). Contributions welcome. +.. warning:: + + Tested with PyPy2. May not work out of the box with PyPy3. + Overview -------- diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -152,3 +152,12 @@ .. branch: small-unroll-improvements Improve optimization of small allocation-heavy loops in the JIT + +.. branch: reflex-support + +.. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 + +.. branch: lexer_token_position_class + +.. branch: refactor-buffer-api +Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,12 +3,6 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ba569fe1efdb - -.. branch: small-unroll-improvements -Improve optimiziation of small allocation-heavy loops in the JIT +.. startrev: 0524dae88c75 .. branch: reflex-support - -.. branch: refactor-buffer-api -Properly implement old/new buffer API for objects and start work on replacing bufferstr usage diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -351,10 +351,11 @@ limit -= len(self.keyword_names_w) for i in range(len(self.keywords)): if i < limit: - w_key = space.wrap(self.keywords[i].decode('utf-8')) + key = self.keywords[i] + space.setitem_str(w_kwds, key, self.keywords_w[i]) else: w_key = self.keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, self.keywords_w[i]) + space.setitem(w_kwds, w_key, self.keywords_w[i]) return w_args, w_kwds # JIT helper functions @@ -446,10 +447,10 @@ break else: if i < limit: - w_key = space.wrap(keywords[i].decode('utf-8')) + space.setitem_str(w_kwds, keywords[i], keywords_w[i]) else: w_key = keyword_names_w[i - limit] - space.setitem(w_kwds, w_key, keywords_w[i]) + space.setitem(w_kwds, w_key, keywords_w[i]) # # ArgErr family of exceptions raised in case of argument mismatch. diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -349,9 +349,13 @@ """ self._application_traceback = traceback - at specialize.memo() + +class ClearedOpErr: + def __init__(self, space): + self.operr = OperationError(space.w_None, space.w_None) + def get_cleared_operation_error(space): - return OperationError(space.w_None, space.w_None) + return space.fromcache(ClearedOpErr).operr # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string @@ -409,14 +413,14 @@ value = getattr(self, attr) if fmt == 'd': result = str(value).decode('ascii') + elif fmt == 'R': + result = space.unicode_w(space.repr(value)) + elif fmt == 'T': + result = space.type(value).get_module_type_name() + elif fmt == 'N': + result = value.getname(space) elif fmt == '8': result = value.decode('utf-8') - elif fmt == 'R': - result = space.unicode_w(space.repr(value)) - elif fmt in 'NT': - if fmt == 'T': - value = space.type(value) - result = value.getname(space) else: result = unicode(value) lst[i + i + 1] = result @@ -457,7 +461,7 @@ %8 - The result of arg.decode('utf-8') %N - The result of w_arg.getname(space) %R - The result of space.unicode_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).getname(space) + %T - The result of space.type(w_arg).get_module_type_name() """ if not len(args): diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -205,11 +205,14 @@ def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! """Implements sys.exc_info(). Return an OperationError instance or None.""" - frame = self.gettopframe_nohidden() + frame = self.gettopframe() while frame: if frame.last_exception is not None: - return frame.last_exception - frame = self.getnextframe_nohidden(frame) + if (not frame.hide() or + frame.last_exception is + get_cleared_operation_error(self.space)): + return frame.last_exception + frame = frame.f_backref() return None def set_sys_exc_info(self, operror): diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -15,6 +15,7 @@ space.setitem(w_modules, w_main, mainmodule) return mainmodule + def compilecode(space, source, filename, cmd='exec'): w = space.wrap w_code = space.builtin.call( @@ -28,7 +29,7 @@ cmd = 'eval' else: cmd = 'exec' - + try: if space is None: from pypy.objspace.std import StdObjSpace @@ -55,18 +56,22 @@ operationerr.record_interpreter_traceback() raise + def run_string(source, filename=None, space=None): _run_eval_string(source, filename, space, False) + def eval_string(source, filename=None, space=None): return _run_eval_string(source, filename, space, True) + def run_file(filename, space=None): - if __name__=='__main__': + if __name__ == '__main__': print "Running %r with %r" % (filename, space) istring = open(filename).read() run_string(istring, filename, space) + def run_module(module_name, args, space=None): """Implements PEP 338 'Executing modules as scripts', overwriting sys.argv[1:] using `args` and executing the module `module_name`. @@ -89,7 +94,6 @@ return space.call_function(w_run_module, w(module_name), space.w_None, w('__main__'), space.w_True) -# ____________________________________________________________ def run_toplevel(space, f, verbose=False): """Calls f() and handle all OperationErrors. diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -94,6 +94,7 @@ def setitem(self, obj, key, value): obj[key] = value + setitem_str = setitem def getitem(self, obj, key): return obj[key] diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -393,3 +393,9 @@ # because it's a regular method, and .__objclass__ # differs from .im_class in case the method is # defined in some parent class of l's actual class + + def test_func_closure(self): + x = 2 + def f(): + return x + assert f.__closure__[0].cell_contents is x diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -462,6 +462,7 @@ ) W_Range.typedef.acceptable_as_base_class = False + class W_RangeIterator(W_Root): def __init__(self, space, w_start, w_step, w_len, w_index=None): self.w_start = w_start diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -353,13 +353,14 @@ def test_range_len(self): x = range(33) assert len(x) == 33 - raises(TypeError, range, 33.2) + exc = raises(TypeError, range, 33.2) + assert "integer" in str(exc.value) x = range(33,0,-1) assert len(x) == 33 x = range(33,0) assert len(x) == 0 - raises(TypeError, range, 33, 0.2) - assert len(x) == 0 + exc = raises(TypeError, range, 33, 0.2) + assert "integer" in str(exc.value) x = range(0,33) assert len(x) == 33 x = range(0,33,-1) @@ -486,6 +487,14 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 + co = compile(buffer('1+2'), '?', 'eval') + assert eval(co) == 3 + exc = raises(TypeError, compile, chr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, unichr(0), '?', 'eval') + assert str(exc.value) == "compile() expected string without null bytes" + exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(SyntaxError, compile, '"\\xt"', '?', 'eval') diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -1,5 +1,4 @@ class AppTestMap: - def test_trivial_map_one_seq(self): assert list(map(lambda x: x+2, [1, 2, 3, 4])) == [3, 4, 5, 6] @@ -105,6 +104,7 @@ raises(TypeError, map, bool) raises(TypeError, map, 42) + class AppTestZip: def test_one_list(self): assert list(zip([1,2,3])) == [(1,), (2,), (3,)] @@ -124,6 +124,7 @@ def test_repr(self): assert repr(zip([1,2,3], [1,2], [1,2,3])).startswith(' 42 for x in S]) == False + class AppTestMinMax: def test_min(self): assert min(1, 2) == 1 diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -812,7 +812,7 @@ if space.isinstance_w(w_string, space.w_unicode): return space.newtuple([w_string, space.len(w_string)]) - string = space.bytes_w(w_string) + string = space.readbuf_w(w_string).as_str() if len(string) == 0: return space.newtuple([space.wrap(u''), space.wrap(0)]) diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -277,7 +277,7 @@ assert enc == b"a\x00\x00\x00" def test_unicode_internal_decode(self): - import sys + import sys, _codecs, array if sys.maxunicode == 65535: # UCS2 build if sys.byteorder == "big": bytes = b"\x00a" @@ -292,6 +292,8 @@ bytes2 = b"\x98\x00\x01\x00" assert bytes2.decode("unicode_internal") == "\U00010098" assert bytes.decode("unicode_internal") == "a" + assert _codecs.unicode_internal_decode(array.array('b', bytes))[0] == u"a" + assert _codecs.unicode_internal_decode(memoryview(bytes))[0] == u"a" def test_raw_unicode_escape(self): import _codecs diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -1,6 +1,6 @@ from rpython.rlib.rstacklet import StackletThread from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef @@ -39,6 +39,7 @@ bottomframe.locals_stack_w[1] = w_callable bottomframe.locals_stack_w[2] = w_args bottomframe.locals_stack_w[3] = w_kwds + bottomframe.last_exception = get_cleared_operation_error(space) self.bottomframe = bottomframe # global_state.origin = self diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py --- a/pypy/module/_continuation/test/test_stacklet.py +++ b/pypy/module/_continuation/test/test_stacklet.py @@ -685,3 +685,17 @@ execfile(self.translated, d) d['set_fast_mode']() d['test_various_depths']() + + def test_exc_info_doesnt_follow_continuations(self): + import sys + from _continuation import continulet + # + def f1(c1): + return sys.exc_info() + # + c1 = continulet(f1) + try: + 1 // 0 + except ZeroDivisionError: + got = c1.switch() + assert got == (None, None, None) diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -59,7 +59,7 @@ self.tt, self.it, calls_repr)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsEntry.typedef = TypeDef( '_lsprof.StatsEntry', @@ -86,7 +86,7 @@ frame_repr, self.callcount, self.reccallcount, self.tt, self.it)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsSubEntry.typedef = TypeDef( '_lsprof.SubStatsEntry', @@ -189,53 +189,84 @@ subentry._stop(tt, it) - at jit.elidable_promote() def create_spec_for_method(space, w_function, w_type): - w_function = w_function + class_name = None if isinstance(w_function, Function): name = w_function.name + # try to get the real class that defines the method, + # which is a superclass of the class of the instance + from pypy.objspace.std.typeobject import W_TypeObject # xxx + if isinstance(w_type, W_TypeObject): + w_realclass, _ = space.lookup_in_type_where(w_type, name) + if isinstance(w_realclass, W_TypeObject): + class_name = w_realclass.get_module_type_name() else: name = '?' - # try to get the real class that defines the method, - # which is a superclass of the class of the instance - from pypy.objspace.std.typeobject import W_TypeObject # xxx - class_name = w_type.getname(space) # if the rest doesn't work - if isinstance(w_type, W_TypeObject) and name != '?': - w_realclass, _ = space.lookup_in_type_where(w_type, name) - if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() - return u"{method '%s' of '%s' objects}" % (name.decode('utf-8'), + if class_name is None: + class_name = w_type.getname(space) # if the rest doesn't work + return u"" % (name.decode('utf-8'), class_name) - at jit.elidable_promote() def create_spec_for_function(space, w_func): - if w_func.w_module is None: - module = u'' + assert isinstance(w_func, Function) + pre = u'built-in function ' if isinstance(w_func, BuiltinFunction) else u'' + if w_func.w_module is not None: + module = space.unicode_w(w_func.w_module) + if module != u'builtins': + return u'<%s%s.%s>' % (pre, module, w_func.getname(space)) + return '<%s%s>' % (pre, w_func.getname(space)) + + +def create_spec_for_object(space, w_type): + class_name = w_type.getname(space) + return u"<'%s' object>" % (class_name,) + + +class W_DelayedBuiltinStr(W_Root): + # This class should not be seen at app-level, but is useful to + # contain a (w_func, w_type) pair returned by prepare_spec(). + # Turning this pair into a string cannot be done eagerly in + # an @elidable function because of space.str_w(), but it can + # be done lazily when we really want it. + + _immutable_fields_ = ['w_func', 'w_type'] + + def __init__(self, w_func, w_type): + self.w_func = w_func + self.w_type = w_type + self.w_string = None + + def wrap_string(self, space): + if self.w_string is None: + if self.w_type is None: + s = create_spec_for_function(space, self.w_func) + elif self.w_func is None: + s = create_spec_for_object(space, self.w_type) + else: + s = create_spec_for_method(space, self.w_func, self.w_type) + self.w_string = space.wrap(s) + return self.w_string + +W_DelayedBuiltinStr.typedef = TypeDef( + 'DelayedBuiltinStr', + __str__ = interp2app(W_DelayedBuiltinStr.wrap_string), +) + +def returns_code(space, w_frame): + if isinstance(w_frame, W_DelayedBuiltinStr): + return w_frame.wrap_string(space) + return w_frame # actually a PyCode object + + +def prepare_spec(space, w_arg): + if isinstance(w_arg, Method): + return (w_arg.w_function, space.type(w_arg.w_instance)) + elif isinstance(w_arg, Function): + return (w_arg, None) else: - module = space.unicode_w(w_func.w_module) - if module == u'builtins': - module = u'' - else: - module += u'.' - pre = u'built-in function ' if isinstance(w_func, BuiltinFunction) else u'' - return u'{%s%s%s}' % (pre, module, w_func.getname(space)) - - - at jit.elidable_promote() -def create_spec_for_object(space, w_obj): - class_name = space.type(w_obj).getname(space) - return u"{'%s' object}" % (class_name,) - - -def create_spec(space, w_arg): - if isinstance(w_arg, Method): - w_type = space.type(w_arg.w_instance) - return create_spec_for_method(space, w_arg.w_function, w_type) - elif isinstance(w_arg, Function): - return create_spec_for_function(space, w_arg) - else: - return create_spec_for_object(space, w_arg) + return (None, space.type(w_arg)) +prepare_spec._always_inline_ = True def lsprof_call(space, w_self, frame, event, w_arg): @@ -248,12 +279,10 @@ w_self._enter_return(code) elif event == 'c_call': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_call(key) + w_self._enter_builtin_call(w_arg) elif event == 'c_return' or event == 'c_exception': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_return(key) + w_self._enter_builtin_return(w_arg) else: # ignore or raise an exception??? pass @@ -316,13 +345,14 @@ return entry raise - @jit.elidable - def _get_or_make_builtin_entry(self, key, make=True): + @jit.elidable_promote() + def _get_or_make_builtin_entry(self, w_func, w_type, make): + key = (w_func, w_type) try: return self.builtin_data[key] except KeyError: if make: - entry = ProfilerEntry(self.space.wrap(key)) + entry = ProfilerEntry(W_DelayedBuiltinStr(w_func, w_type)) self.builtin_data[key] = entry return entry raise @@ -346,18 +376,18 @@ context._stop(self, entry) self.current_context = context.previous - def _enter_builtin_call(self, key): - self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key) + def _enter_builtin_call(self, w_arg): + w_func, w_type = prepare_spec(self.space, w_arg) + entry = self._get_or_make_builtin_entry(w_func, w_type, True) self.current_context = ProfilerContext(self, entry) - def _enter_builtin_return(self, key): + def _enter_builtin_return(self, w_arg): context = self.current_context if context is None: return - self = jit.promote(self) + w_func, w_type = prepare_spec(self.space, w_arg) try: - entry = self._get_or_make_builtin_entry(key, False) + entry = self._get_or_make_builtin_entry(w_func, w_type, False) except KeyError: pass else: diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -11,6 +11,48 @@ import _lsprof assert repr(_lsprof.Profiler) == "" + def test_builtins(self): + import _lsprof + prof = _lsprof.Profiler() + lst = [] + prof.enable() + lst.append(len(lst)) + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + "", + ) + for entry in stats: + assert entry.code in expected + + def test_builtins_callers(self): + import _lsprof + prof = _lsprof.Profiler(subcalls=True) + lst = [] + def f1(): + lst.append(len(lst)) + prof.enable(subcalls=True) + f1() + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + ) + by_id = set() + for entry in stats: + if entry.code == f1.func_code: + assert len(entry.calls) == 2 + for subentry in entry.calls: + assert subentry.code in expected + by_id.add(id(subentry.code)) + elif entry.code in expected: + by_id.add(id(entry.code)) + # :-( cProfile.py relies on the id() of the strings... + assert len(by_id) == len(expected) + def test_direct(self): import _lsprof def getticks(): @@ -37,10 +79,8 @@ stats = prof.getstats() entries = {} for entry in stats: - if not hasattr(entry.code, 'co_name'): - print(entry.code) - else: - entries[entry.code.co_name] = entry + assert hasattr(entry.code, 'co_name') + entries[entry.code.co_name] = entry efoo = entries['foo'] assert efoo.callcount == 2 assert efoo.reccallcount == 1 diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -193,7 +193,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.bufferstr_w(w_value) + value = space.str_w(w_value) if start + len(value) != stop: raise OperationError(space.w_ValueError, space.wrap("cannot resize array")) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1095,9 +1095,9 @@ b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -612,7 +612,8 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_RSocket.typedef = TypeDef("socket", + __module__ = "_socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -305,6 +305,11 @@ cls.space = space cls.w_udir = space.wrap(str(udir)) + def test_module(self): + import _socket + assert _socket.socket.__name__ == 'socket' + assert _socket.socket.__module__ == '_socket' + def test_ntoa_exception(self): import _socket raises(_socket.error, _socket.inet_ntoa, b"ab") diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -494,6 +494,9 @@ return return self.delitem(space, start, stop) + def descr_iter(self, space): + return space.newseqiter(self) + def descr_add(self, space, w_other): raise NotImplementedError @@ -545,6 +548,7 @@ __getitem__ = interp2app(W_ArrayBase.descr_getitem), __setitem__ = interp2app(W_ArrayBase.descr_setitem), __delitem__ = interp2app(W_ArrayBase.descr_delitem), + __iter__ = interp2app(W_ArrayBase.descr_iter), __add__ = interpindirect2app(W_ArrayBase.descr_add), __iadd__ = interpindirect2app(W_ArrayBase.descr_inplace_add), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -681,6 +681,8 @@ for i in a: b.append(i) assert repr(b) == "array('i', [1, 2, 3])" + assert hasattr(b, '__iter__') + assert next(b.__iter__()) == 1 def test_lying_iterable(self): class lier(object): diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -16,7 +16,7 @@ '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', - 'CPPInstance' : 'interp_cppyy.W_CPPInstance', + 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', } @@ -25,6 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', + 'Template' : 'pythonify.CPPTemplate', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -127,19 +127,18 @@ argc = len(args_w) try: - # Note: argcount is +1 for the class (== w_self) - if argc < 5 or 6 < argc: + if argc < 4 or 5 < argc: raise TypeError("wrong number of arguments") - # second argument must be a name - funcname = space.str_w(args_w[1]) + # first argument must be a name + funcname = space.str_w(args_w[0]) # last (optional) argument is number of parameters npar = 0 - if argc == 6: npar = space.int_w(args_w[5]) + if argc == 5: npar = space.int_w(args_w[4]) - # third argument must be a callable python object - w_callable = args_w[2] + # second argument must be a callable python object + w_callable = args_w[1] if not space.is_true(space.callable(w_callable)): raise TypeError("2nd argument is not a valid python callable") @@ -159,17 +158,21 @@ # so far, so good; leaves on issue: CINT is expecting a wrapper, but # we need the overload that takes a function pointer, which is not in # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, - space.float_w(args_w[3]), space.float_w(args_w[4]), npar) - - from pypy.module.cppyy import interp_cppyy - w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, - do_cast=False, python_owns=True, fresh=True) + newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, + space.float_w(args_w[2]), space.float_w(args_w[3]), npar) + + # w_self is a null-ptr bound as TF1 + from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator + cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) + cppself._rawobject = newinst + memory_regulator.register(cppself) # tie all the life times to the TF1 instance - space.setattr(w_instance, space.wrap('_callback'), w_callback) + space.setattr(w_self, space.wrap('_callback'), w_callback) - return w_instance + # by definition for __init__ + return None + except (OperationError, TypeError, IndexError), e: newargs_w = args_w[1:] # drop class @@ -312,7 +315,7 @@ # location w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.buffer_w(w_address) + buf = space.getarg_w('s*', w_address) from pypy.module._rawffi import buffer assert isinstance(buf, buffer.RawFFIBuffer) address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) @@ -395,7 +398,7 @@ _method_alias(space, w_pycppclass, "__len__", "GetSize") elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -63,7 +63,7 @@ def get_rawbuffer(space, w_obj): # raw buffer try: - buf = space.buffer_w(w_obj, space.BUF_SIMPLE) + buf = space.getarg_w('s*', w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass @@ -163,7 +163,7 @@ def to_memory(self, space, w_obj, w_value, offset): # copy the full array (uses byte copy for now) address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.getarg_w('s*', w_value) # TODO: report if too many items given? for i in range(min(self.size*self.typesize, buf.getlength())): address[i] = buf.getitem(i) @@ -204,7 +204,7 @@ # copy only the pointer value rawobject = get_rawobject_nonnull(space, w_obj) byteptr = rffi.cast(rffi.CCHARPP, capi.direct_ptradd(rawobject, offset)) - buf = space.buffer_w(w_value, space.BUF_SIMPLE) + buf = space.getarg_w('s*', w_value) try: byteptr[0] = buf.get_raw_address() except ValueError: diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -155,18 +155,16 @@ the memory_regulator.""" _attrs_ = ['space', 'scope', 'index', 'cppmethod', 'arg_defs', 'args_required', - 'args_expected', 'converters', 'executor', '_funcaddr', 'cif_descr', - 'uses_local'] + 'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local'] _immutable_ = True - def __init__(self, space, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, declaring_scope, method_index, arg_defs, args_required): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.index = method_index self.cppmethod = capi.c_get_method(self.space, self.scope, method_index) self.arg_defs = arg_defs self.args_required = args_required - self.args_expected = len(arg_defs) # Setup of the method dispatch's innards is done lazily, i.e. only when # the method is actually used. @@ -176,6 +174,12 @@ self._funcaddr = lltype.nullptr(rffi.VOIDP.TO) self.uses_local = False + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) + cppinstance._nullcheck() + return cppinstance.get_cppthis(declaring_scope) + def _address_from_local_buffer(self, call_local, idx): if not call_local: return call_local @@ -277,7 +281,7 @@ funcaddr = methgetter(rffi.cast(capi.C_OBJECT, cppthis)) self._funcaddr = rffi.cast(rffi.VOIDP, funcaddr) - nargs = self.args_expected + 1 # +1: cppthis + nargs = len(self.arg_defs) + 1 # +1: cppthis # memory block for CIF description (note: not tracked as the life # time of methods is normally the duration of the application) @@ -335,7 +339,7 @@ # extra cif_descr.abi = clibffi.FFI_DEFAULT_ABI - cif_descr.nargs = self.args_expected + 1 # +1: cppthis + cif_descr.nargs = len(self.arg_defs) + 1 # +1: cppthis res = jit_libffi.jit_ffi_prep_cif(cif_descr) if res != clibffi.FFI_OK: @@ -405,28 +409,29 @@ class CPPFunction(CPPMethod): - """Global (namespaced) function dispatcher. For now, the base class has - all the needed functionality, by allowing the C++ this pointer to be null - in the call. An optimization is expected there, however.""" + """Global (namespaced) function dispatcher.""" _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return capi.C_NULL_OBJECT + def __repr__(self): return "CPPFunction: %s" % self.signature() class CPPTemplatedCall(CPPMethod): - """Method dispatcher that first needs to resolve the template instance. - Note that the derivation is from object: the CPPMethod is a data member.""" + """Method dispatcher that first resolves the template instance.""" - _attrs_ = ['space', 'templ_args', 'method'] + _attrs_ = ['space', 'templ_args'] _immutable_ = True - def __init__(self, space, templ_args, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, templ_args, declaring_scope, method_index, arg_defs, args_required): self.space = space self.templ_args = templ_args # TODO: might have to specialize for CPPTemplatedCall on CPPMethod/CPPFunction here - CPPMethod.__init__(self, space, containing_scope, method_index, arg_defs, args_required) + CPPMethod.__init__(self, space, declaring_scope, method_index, arg_defs, args_required) def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -450,20 +455,21 @@ class CPPConstructor(CPPMethod): """Method dispatcher that constructs new objects. This method can not have - a fast path, a the allocation of the object is currently left to the - reflection layer only, b/c the C++ class may have an overloaded operator + a fast path, as the allocation of the object is currently left to the + reflection layer only, since the C++ class may have an overloaded operator new, disallowing malloc here.""" _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return rffi.cast(capi.C_OBJECT, declaring_scope.handle) + def call(self, cppthis, args_w): - # TODO: these casts are very, very un-pretty; need to find a way of - # re-using CPPMethod's features w/o these roundabouts - vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - w_result = CPPMethod.call(self, vscope, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) - return wrap_cppobject(self.space, newthis, self.scope, - do_cast=False, python_owns=True, fresh=True) + # Note: this does not return a wrapped instance, just a pointer to the + # new instance; the overload must still wrap it before returning. Also, + # cppthis is declaring_scope.handle (as per unpack_cppthis(), above). + return CPPMethod.call(self, cppthis, args_w) def __repr__(self): return "CPPConstructor: %s" % self.signature() @@ -495,9 +501,10 @@ _attrs_ = ['space', 'scope', 'functions'] _immutable_fields_ = ['scope', 'functions[*]'] - def __init__(self, space, containing_scope, functions): + def __init__(self, space, declaring_scope, functions): self.space = space - self.scope = containing_scope + self.scope = declaring_scope + assert len(functions) from rpython.rlib import debug self.functions = debug.make_sure_not_resized(functions) @@ -510,12 +517,10 @@ @jit.unroll_safe @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) - if cppinstance is not None: - cppinstance._nullcheck() - cppthis = cppinstance.get_cppthis(self.scope) - else: - cppthis = capi.C_NULL_OBJECT + # instance handling is specific to the function type only, so take it out + # of the loop over function overloads + cppthis = self.functions[0].unpack_cppthis( + self.space, w_cppinstance, self.functions[0].scope) assert lltype.typeOf(cppthis) == capi.C_OBJECT # The following code tries out each of the functions in order. If @@ -575,6 +580,39 @@ ) +class W_CPPConstructorOverload(W_CPPOverload): + @jit.elidable_promote() + def is_static(self): + return self.space.w_False + + @jit.elidable_promote() + def unpack_cppthis(self, w_cppinstance): + return rffi.cast(capi.C_OBJECT, self.scope.handle) + + @jit.unroll_safe + @unwrap_spec(args_w='args_w') + def call(self, w_cppinstance, args_w): + w_result = W_CPPOverload.call(self, w_cppinstance, args_w) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if cppinstance is not None: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return w_cppinstance + return wrap_cppobject(self.space, newthis, self.functions[0].scope, + do_cast=False, python_owns=True, fresh=True) + + def __repr__(self): + return "W_CPPConstructorOverload(%s)" % [f.signature() for f in self.functions] + +W_CPPConstructorOverload.typedef = TypeDef( + 'CPPConstructorOverload', + is_static = interp2app(W_CPPConstructorOverload.is_static), + call = interp2app(W_CPPConstructorOverload.call), + signature = interp2app(W_CPPOverload.signature), +) + + class W_CPPBoundMethod(W_Root): _attrs_ = ['cppthis', 'method'] @@ -595,9 +633,9 @@ _attrs_ = ['space', 'scope', 'converter', 'offset'] _immutable_fields = ['scope', 'converter', 'offset'] - def __init__(self, space, containing_scope, type_name, offset): + def __init__(self, space, declaring_scope, type_name, offset): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.converter = converter.get_converter(self.space, type_name, '') self.offset = offset @@ -707,7 +745,10 @@ # create the overload methods from the method sets for pyname, methods in methods_temp.iteritems(): CPPMethodSort(methods).sort() - overload = W_CPPOverload(self.space, self, methods[:]) + if pyname == self.name: + overload = W_CPPConstructorOverload(self.space, self, methods[:]) + else: + overload = W_CPPOverload(self.space, self, methods[:]) self.methods[pyname] = overload def full_name(self): @@ -847,14 +888,13 @@ class W_CPPClass(W_CPPScope): - _attrs_ = ['space', 'default_constructor', 'name', 'handle', 'methods', 'datamembers'] - _immutable_fields_ = ['kind', 'default_constructor', 'methods[*]', 'datamembers[*]'] + _attrs_ = ['space', 'name', 'handle', 'methods', 'datamembers'] + _immutable_fields_ = ['kind', 'constructor', 'methods[*]', 'datamembers[*]'] kind = "class" def __init__(self, space, name, opaque_handle): W_CPPScope.__init__(self, space, name, opaque_handle) - self.default_constructor = None def _make_cppfunction(self, pyname, index): num_args = capi.c_method_num_args(self.space, self, index) @@ -866,8 +906,6 @@ arg_defs.append((arg_type, arg_dflt)) if capi.c_is_constructor(self.space, self, index): cppfunction = CPPConstructor(self.space, self, index, arg_defs, args_required) - if args_required == 0: - self.default_constructor = cppfunction elif capi.c_method_is_template(self.space, self, index): templ_args = capi.c_template_args(self.space, self, index) cppfunction = CPPTemplatedCall(self.space, templ_args, self, index, arg_defs, args_required) @@ -895,9 +933,7 @@ self.datamembers[datamember_name] = datamember def construct(self): - if self.default_constructor is not None: - return self.default_constructor.call(capi.C_NULL_OBJECT, []) - raise self.missing_attribute_error("default_constructor") + return self.get_overload(self.name).call(None, []) def find_overload(self, name): raise self.missing_attribute_error(name) @@ -1036,6 +1072,16 @@ raise return None + def instance__init__(self, args_w): + try: + constructor_overload = self.cppclass.get_overload(self.cppclass.name) + constructor_overload.call(self, args_w) + except OperationError, e: + if not e.match(self.space, self.space.w_AttributeError): + raise + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name)) + def instance__eq__(self, w_other): # special case: if other is None, compare pointer-style if self.space.is_w(w_other, self.space.w_None): @@ -1118,6 +1164,7 @@ 'CPPInstance', cppclass = interp_attrproperty('cppclass', cls=W_CPPInstance), _python_owns = GetSetProperty(W_CPPInstance.fget_python_owns, W_CPPInstance.fset_python_owns), + __init__ = interp2app(W_CPPInstance.instance__init__), __eq__ = interp2app(W_CPPInstance.instance__eq__), __ne__ = interp2app(W_CPPInstance.instance__ne__), __nonzero__ = interp2app(W_CPPInstance.instance__nonzero__), @@ -1141,10 +1188,14 @@ self.objects = rweakref.RWeakValueDictionary(int, W_CPPInstance) def register(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, obj) def unregister(self, obj): + if not obj._rawobject: + return int_address = int(rffi.cast(rffi.LONG, obj._rawobject)) self.objects.set(int_address, None) @@ -1194,7 +1245,7 @@ w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) # try to recycle existing object if this one is not newly created - if not fresh: + if not fresh and rawobject: obj = memory_regulator.retrieve(rawobject) if obj is not None and obj.cppclass is cppclass: return obj diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,33 +1,37 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible # with info from multiple dictionaries and do not need to bother with meta # classes for inheritance. Both are python classes, though, and refactoring # may be in order at some point. -class CppyyScopeMeta(type): +class CPPScope(type): def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) -class CppyyNamespaceMeta(CppyyScopeMeta): +class CPPNamespace(CPPScope): def __dir__(cls): return cls._cpp_proxy.__dir__() -class CppyyClassMeta(CppyyScopeMeta): +class CPPClass(CPPScope): pass -# class CppyyClass defined in _init_pythonify() +# class CPPInstance defined in _init_pythonify() -class CppyyTemplateType(object): - def __init__(self, scope, name): - self._scope = scope +class CPPTemplate(object): + def __init__(self, name, scope=None): self._name = name + if scope is None: + self._scope = gbl + else: + self._scope = scope def _arg_to_str(self, arg): if arg == str: @@ -88,7 +92,7 @@ # build up a representation of a C++ namespace (namespaces are classes) # create a meta class to allow properties (for static data write access) - metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) + metans = type(CPPNamespace)(namespace_name+'_meta', (CPPNamespace,), {}) if cppns: d = {"_cpp_proxy" : cppns} @@ -134,16 +138,14 @@ break return tuple(bases) -def make_new(class_name, cppclass): - try: - constructor_overload = cppclass.get_overload(cppclass.type_name) - except AttributeError: - msg = "cannot instantiate abstract class '%s'" % class_name - def __new__(cls, *args): - raise TypeError(msg) - else: - def __new__(cls, *args): - return constructor_overload.call(None, *args) +def make_new(class_name): + def __new__(cls, *args): + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -151,7 +153,7 @@ # get a list of base classes for class creation bases = [get_pycppclass(base) for base in cppclass.get_base_names()] if not bases: - bases = [CppyyClass,] + bases = [CPPInstance,] else: # it's technically possible that the required class now has been built # if one of the base classes uses it in e.g. a function interface @@ -162,7 +164,7 @@ # create a meta class to allow properties (for static data write access) metabases = [type(base) for base in bases] - metacpp = type(CppyyClassMeta)(class_name+'_meta', _drop_cycles(metabases), {}) + metacpp = type(CPPClass)(class_name+'_meta', _drop_cycles(metabases), {}) # create the python-side C++ class representation def dispatch(self, name, signature): @@ -170,7 +172,7 @@ return types.MethodType(make_method(name, cppol), self, type(self)) d = {"_cpp_proxy" : cppclass, "__dispatch__" : dispatch, - "__new__" : make_new(class_name, cppclass), + "__new__" : make_new(class_name), } pycppclass = metacpp(class_name, _drop_cycles(bases), d) @@ -206,7 +208,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(scope, template_name) + return CPPTemplate(template_name, scope) def get_pycppitem(scope, name): @@ -301,7 +303,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] @@ -418,13 +420,12 @@ # at pypy-c startup, rather than on the "import cppyy" statement import cppyy - # top-level classes - global CppyyClass - class CppyyClass(cppyy.CPPInstance): - __metaclass__ = CppyyClassMeta - - def __init__(self, *args, **kwds): - pass # ignored, for the C++ backend, ctor == __new__ + __init__ + # root of all proxy classes: CPPInstance in pythonify exists to combine the + # CPPClass meta class with the interp-level CPPInstanceBase + global CPPInstance + class CPPInstance(cppyy.CPPInstanceBase): + __metaclass__ = CPPClass + pass # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -3,12 +3,28 @@ #include #include +#include +#include #include #include +#include #include #include +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + +// add example01.cxx code +int globalAddOneToInt(int a); + +namespace dummy { +#include "example01.cxx" +#include "datatypes.cxx" +} + +int globalAddOneToInt(int a) { + return dummy::globalAddOneToInt(a); +} /* pseudo-reflection data ------------------------------------------------- */ namespace { @@ -16,113 +32,307 @@ typedef std::map Handles_t; static Handles_t s_handles; +enum EMethodType { kNormal=0, kConstructor=1, kStatic=2 }; + struct Cppyy_PseudoMethodInfo { Cppyy_PseudoMethodInfo(const std::string& name, const std::vector& argtypes, - const std::string& returntype) : - m_name(name), m_argtypes(argtypes), m_returntype(returntype) {} + const std::string& returntype, + EMethodType mtype = kNormal) : + m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} std::string m_name; std::vector m_argtypes; std::string m_returntype; + EMethodType m_type; +}; + +struct Cppyy_PseudoDatambrInfo { + Cppyy_PseudoDatambrInfo(const std::string& name, + const std::string& type, + size_t offset, bool isstatic) : + m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} + + std::string m_name; + std::string m_type; + size_t m_offset; + bool m_isstatic; }; struct Cppyy_PseudoClassInfo { Cppyy_PseudoClassInfo() {} - Cppyy_PseudoClassInfo(const std::vector& methods) : - m_methods(methods ) {} + Cppyy_PseudoClassInfo(const std::vector& methods, + long method_offset, + const std::vector& data) : + m_methods(methods), m_method_offset(method_offset), m_datambrs(data) {} std::vector m_methods; + long m_method_offset; + std::vector m_datambrs; }; typedef std::map Scopes_t; static Scopes_t s_scopes; -class PseudoExample01 { -public: - PseudoExample01() : m_somedata(-99) {} - PseudoExample01(int a) : m_somedata(a) {} - PseudoExample01(const PseudoExample01& e) : m_somedata(e.m_somedata) {} - PseudoExample01& operator=(const PseudoExample01& e) { - if (this != &e) m_somedata = e.m_somedata; - return *this; - } - virtual ~PseudoExample01() {} +static std::map s_methods; -public: - int m_somedata; -}; +#define PUBLIC_CPPYY_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ + offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname, argtypes, #dmtype)); \ + s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + argtypes.push_back(#dmtype); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname, argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + argtypes.clear(); \ + argtypes.push_back("const "#dmtype"&"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname"_c", argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ -static int example01_last_static_method = 0; -static int example01_last_constructor = 0; +#define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ + PUBLIC_CPPYY_DATA(dmname, dmtype); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array2", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ + PUBLIC_CPPYY_DATA2(dmname, dmtype); \ + argtypes.push_back(#dmtype"*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + argtypes.clear(); argtypes.push_back("void*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_void_array_"#key, argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + +#define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ + (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- - static long s_scope_id = 0; + static long s_scope_id = 0; + static long s_method_id = 0; + + { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // ( 0) static double staticAddToDouble(double a) + // static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double", kStatic)); + s_methods["static_example01::staticAddToDouble_double"] = s_method_id++; - // ( 1) static int staticAddOneToInt(int a) - // ( 2) static int staticAddOneToInt(int a, int b) + // static int staticAddOneToInt(int a) + // static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int_int"] = s_method_id++; - // ( 3) static int staticAtoi(const char* str) + // static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int", kStatic)); + s_methods["static_example01::staticAtoi_cchar*"] = s_method_id++; - // ( 4) static char* staticStrcpy(const char* strin) - methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // static char* staticStrcpy(const char* strin) + methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*", kStatic)); + s_methods["static_example01::staticStrcpy_cchar*"] = s_method_id++; - // ( 5) static void staticSetPayload(payload* p, double d) - // ( 6) static payload* staticCyclePayload(payload* p, double d) - // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + // static void staticSetPayload(payload* p, double d) + // static payload* staticCyclePayload(payload* p, double d) + // static payload staticCopyCyclePayload(payload* p, double d) argtypes.clear(); argtypes.push_back("payload*"); argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void", kStatic)); + s_methods["static_example01::staticSetPayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*", kStatic)); + s_methods["static_example01::staticCyclePayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload", kStatic)); + s_methods["static_example01::staticCopyCyclePayload_payload*_double"] = s_method_id++; - // ( 8) static int getCount() - // ( 9) static void setCount(int) + // static int getCount() + // static void setCount(int) argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int", kStatic)); + s_methods["static_example01::getCount"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void", kStatic)); + s_methods["static_example01::setCount_int"] = s_method_id++; - // cut-off is used in cppyy_is_static - example01_last_static_method = methods.size(); + // example01() + // example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01"] = s_method_id++; + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01_int"] = s_method_id++; - // (10) example01() - // (11) example01(int a) + // int addDataToInt(int a) argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + s_methods["example01::addDataToInt_int"] = s_method_id++; - // cut-off is used in cppyy_is_constructor - example01_last_constructor = methods.size(); + // int addDataToIntConstRef(const int& a) + argtypes.clear(); + argtypes.push_back("const int&"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + s_methods["example01::addDataToIntConstRef_cint&"] = s_method_id++; - // (12) double addDataToDouble(double a) + // int overloadedAddDataToInt(int a, int b) + argtypes.clear(); + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int"] = s_method_id++; + + // int overloadedAddDataToInt(int a) + // int overloadedAddDataToInt(int a, int b, int c) + argtypes.clear(); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int"] = s_method_id++; + argtypes.push_back("int"); + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int_int"] = s_method_id++; + + // double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + s_methods["example01::addDataToDouble_double"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods); + // int addDataToAtoi(const char* str) + // char* addToStringValue(const char* str) + argtypes.clear(); + argtypes.push_back("const char*"); + methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + s_methods["example01::addDataToAtoi_cchar*"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + s_methods["example01::addToStringValue_cchar*"] = s_method_id++; + + // void setPayload(payload* p) + // payload* cyclePayload(payload* p) + // payload copyCyclePayload(payload* p) + argtypes.clear(); + argtypes.push_back("payload*"); + methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + s_methods["example01::setPayload_payload*"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + s_methods["example01::cyclePayload_payload*"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + s_methods["example01::copyCyclePayload_payload*"] = s_method_id++; + + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; - // -- class example01 + } // -- class example01 + + //==================================================================== + + { // class payload -- + s_handles["payload"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // payload(double d = 0.) + std::vector argtypes; + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor", kConstructor)); + s_methods["payload::payload_double"] = s_method_id++; + + // double getData() + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + s_methods["payload::getData"] = s_method_id++; + + // void setData(double d) + argtypes.clear(); + argtypes.push_back("double"); + methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + s_methods["payload::setData_double"] = s_method_id++; + + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class payload + + //==================================================================== + + { // class cppyy_test_data -- + s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // cppyy_test_data() + std::vector argtypes; + methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); + s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + + methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); + s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + + std::vector data; + PUBLIC_CPPYY_DATA2(bool, bool); + PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (uchar, unsigned char); + PUBLIC_CPPYY_DATA3(short, short, h); + PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); + PUBLIC_CPPYY_DATA3(int, int, i); + PUBLIC_CPPYY_DATA3(uint, unsigned int, I); + PUBLIC_CPPYY_DATA3(long, long, l); + PUBLIC_CPPYY_DATA3(ulong, unsigned long, L); + PUBLIC_CPPYY_DATA (llong, long long); + PUBLIC_CPPYY_DATA (ullong, unsigned long long); + PUBLIC_CPPYY_DATA3(float, float, f); + PUBLIC_CPPYY_DATA3(double, double, d); + PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (voidp, void*); + + PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); From noreply at buildbot.pypy.org Fri May 2 03:11:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 May 2014 03:11:17 +0200 (CEST) Subject: [pypy-commit] pypy py3k: seqiter is good enough for default so kill our AraryIterator Message-ID: <20140502011117.C1F4A1C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71169:2b304812a6bc Date: 2014-05-01 17:34 -0700 http://bitbucket.org/pypy/pypy/changeset/2b304812a6bc/ Log: seqiter is good enough for default so kill our AraryIterator diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -517,9 +517,6 @@ # Misc methods - def descr_iter(self, space): - return space.wrap(ArrayIterator(self)) - def descr_repr(self, space): if self.len == 0: return space.wrap("array('%s')" % self.typecode) @@ -557,7 +554,6 @@ __radd__ = interp2app(W_ArrayBase.descr_radd), __rmul__ = interp2app(W_ArrayBase.descr_rmul), - __iter__ = interp2app(W_ArrayBase.descr_iter), __repr__ = interp2app(W_ArrayBase.descr_repr), itemsize = GetSetProperty(descr_itemsize), @@ -665,28 +661,6 @@ return self.array._charbuf_start() -class ArrayIterator(W_Root): - def __init__(self, array): - self.index = 0 - self.array = array - - def iter_w(self, space): - return space.wrap(self) - - def next_w(self, space): - if self.index < self.array.len: - w_value = self.array.w_getitem(space, self.index) - self.index += 1 - return w_value - raise OperationError(space.w_StopIteration, space.w_None) - -ArrayIterator.typedef = TypeDef( - 'arrayiterator', - __iter__ = interp2app(ArrayIterator.iter_w), - __next__ = interp2app(ArrayIterator.next_w), - ) - - def make_array(mytype): W_ArrayBase = globals()['W_ArrayBase'] From noreply at buildbot.pypy.org Fri May 2 03:11:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 May 2014 03:11:19 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix ztranslation Message-ID: <20140502011119.1D4081C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71170:b9354e0d0dcd Date: 2014-05-01 17:52 -0700 http://bitbucket.org/pypy/pypy/changeset/b9354e0d0dcd/ Log: fix ztranslation diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -65,7 +65,7 @@ pass class W_MyType(W_MyObject): - name = "foobar" + name = u"foobar" def __init__(self): self.mro_w = [w_some_obj(), w_some_obj()] From noreply at buildbot.pypy.org Fri May 2 03:19:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 03:19:29 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: adjust get_module to work like cpython Message-ID: <20140502011929.C337F1C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71171:78d387a3ea28 Date: 2014-05-01 20:39 -0400 http://bitbucket.org/pypy/pypy/changeset/78d387a3ea28/ Log: adjust get_module to work like cpython diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -473,19 +473,17 @@ return res return _issubtype(w_self, w_type) - def get_module(w_self): - space = w_self.space - if w_self.is_heaptype() and w_self.getdictvalue(space, '__module__') is not None: - return w_self.getdictvalue(space, '__module__') + def get_module(self): + space = self.space + if self.is_heaptype(): + return self.getdictvalue(space, '__module__') else: - # for non-heap types, CPython checks for a module.name in the - # type name. That's a hack, so we're allowed to use a different - # hack... - if ('__module__' in w_self.dict_w and - space.isinstance_w(w_self.getdictvalue(space, '__module__'), - space.w_str)): - return w_self.getdictvalue(space, '__module__') - return space.wrap('__builtin__') + dot = self.name.find('.') + if dot != -1: + mod = self.name[:dot] + else: + mod = "__builtin__" + return space.wrap(mod) def get_module_type_name(w_self): space = w_self.space From noreply at buildbot.pypy.org Fri May 2 03:19:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 03:19:31 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: adjust getname to work like cpython Message-ID: <20140502011931.03FA81C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71172:5628f63f92cd Date: 2014-05-01 20:58 -0400 http://bitbucket.org/pypy/pypy/changeset/5628f63f92cd/ Log: adjust getname to work like cpython diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -495,11 +495,15 @@ return '%s.%s' % (mod, w_self.name) return w_self.name - def getname(w_self, space): - name = w_self.name - if name is None: - name = '?' - return name + def getname(self, space): + if self.is_heaptype(): + return self.name + else: + dot = self.name.find('.') + if dot != -1: + return self.name[dot+1:] + else: + return self.name def add_subclass(w_self, w_subclass): space = w_self.space From noreply at buildbot.pypy.org Fri May 2 03:19:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 03:19:32 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: kill get_module_type_name, fix some cases Message-ID: <20140502011932.31A8D1C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71173:2e04ced7667d Date: 2014-05-01 21:13 -0400 http://bitbucket.org/pypy/pypy/changeset/2e04ced7667d/ Log: kill get_module_type_name, fix some cases diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -363,7 +363,7 @@ if fmt == 'R': result = space.str_w(space.repr(value)) elif fmt == 'T': - result = space.type(value).get_module_type_name() + result = space.type(value).name elif fmt == 'N': result = value.getname(space) else: @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).get_module_type_name() + %T - The result of space.type(w_arg).name """ if not len(args): diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + class_name = w_realclass.name else: name = '?' if class_name is None: @@ -440,7 +440,7 @@ return space.wrap(p) W_Profiler.typedef = TypeDef( - 'Profiler', + '_lsprof.Profiler', __module__ = '_lsprof', __new__ = interp2app(descr_new_profile), enable = interp2app(W_Profiler.enable), diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -488,7 +488,7 @@ return space.wrap(s) W_ArrayBase.typedef = TypeDef( - 'array', + 'array.array', __new__ = interp2app(w_array), __module__ = 'array', diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -244,7 +244,7 @@ for k, v in kwargs.items(): kwargs[k] = interp2app(v.__get__(None, realbase)) W_Exc.typedef = TypeDef( - name, + 'exceptions.' + name, base.typedef, __doc__ = W_Exc.__doc__, __module__ = 'exceptions', diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -73,9 +73,6 @@ def get_module(self): return w_some_obj() - def get_module_type_name(self): - return self.name - def w_some_obj(): if NonConstant(False): return W_Root() diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -485,16 +485,6 @@ mod = "__builtin__" return space.wrap(mod) - def get_module_type_name(w_self): - space = w_self.space - if not w_self.is_heaptype(): - w_mod = w_self.get_module() - if space.isinstance_w(w_mod, space.w_str): - mod = space.str_w(w_mod) - if mod != '__builtin__': - return '%s.%s' % (mod, w_self.name) - return w_self.name - def getname(self, space): if self.is_heaptype(): return self.name @@ -1113,7 +1103,7 @@ else: kind = 'class' if mod is not None and mod != '__builtin__': - return space.wrap("<%s '%s.%s'>" % (kind, mod, w_obj.name)) + return space.wrap("<%s '%s.%s'>" % (kind, mod, w_obj.getname(space))) else: return space.wrap("<%s '%s'>" % (kind, w_obj.name)) From noreply at buildbot.pypy.org Fri May 2 03:19:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 03:19:33 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: these pass again Message-ID: <20140502011933.533281C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71174:74bf9b0ee01f Date: 2014-05-01 21:18 -0400 http://bitbucket.org/pypy/pypy/changeset/74bf9b0ee01f/ Log: these pass again diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -223,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert "Num' object has no attribute 'n'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'n'" x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert "Num' object has no attribute 'lineno'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'lineno'" y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert "Num' object has no attribute 'foobar'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'foobar'" x = ast.Num(lineno=2) assert x.lineno == 2 @@ -407,7 +407,7 @@ def test_issue1673_Num_fullinit(self): import ast - import copy + import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) assert num_node.n == num_node2.n From noreply at buildbot.pypy.org Fri May 2 03:33:47 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 May 2014 03:33:47 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix translation Message-ID: <20140502013347.769E31C00B9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71175:f81c9ca6cbc2 Date: 2014-05-01 18:33 -0700 http://bitbucket.org/pypy/pypy/changeset/f81c9ca6cbc2/ Log: fix translation diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -215,7 +215,7 @@ module = space.unicode_w(w_func.w_module) if module != u'builtins': return u'<%s%s.%s>' % (pre, module, w_func.getname(space)) - return '<%s%s>' % (pre, w_func.getname(space)) + return u'<%s%s>' % (pre, w_func.getname(space)) def create_spec_for_object(space, w_type): From noreply at buildbot.pypy.org Fri May 2 03:36:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 03:36:42 +0200 (CEST) Subject: [pypy-commit] pypy default: check for null bytes when setting __name__ Message-ID: <20140502013642.F06541C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71176:c7cd23b6b70f Date: 2014-05-01 21:36 -0400 http://bitbucket.org/pypy/pypy/changeset/c7cd23b6b70f/ Log: check for null bytes when setting __name__ diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -885,6 +885,12 @@ Abc.__name__ = 'Def' assert Abc.__name__ == 'Def' raises(TypeError, "Abc.__name__ = 42") + try: + Abc.__name__ = 'G\x00hi' + except ValueError as e: + assert str(e) == "__name__ must not contain null bytes" + else: + assert False def test_compare(self): class A(object): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -634,7 +634,10 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise oefmt(space.w_TypeError, "can't set %N.__name__", w_type) - w_type.name = space.str_w(w_value) + name = space.str_w(w_value) + if '\x00' in name: + raise oefmt(space.w_ValueError, "__name__ must not contain null bytes") + w_type.name = name def descr_get__mro__(space, w_type): w_type = _check(space, w_type) From noreply at buildbot.pypy.org Fri May 2 05:48:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 05:48:54 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: adjust more Message-ID: <20140502034854.C0EE11C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71177:72bff2dbc7f4 Date: 2014-05-01 21:32 -0400 http://bitbucket.org/pypy/pypy/changeset/72bff2dbc7f4/ Log: adjust more diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,8 +600,7 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("socket", - __module__ = "_socket", +W_RSocket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -620,7 +620,7 @@ def descr_get__name__(space, w_type): w_type = _check(space, w_type) - return space.wrap(w_type.name) + return space.wrap(w_type.getname(space)) def descr_set__name__(space, w_type, w_value): w_type = _check(space, w_type) From noreply at buildbot.pypy.org Fri May 2 05:48:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 05:48:56 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: merge default Message-ID: <20140502034856.0E35E1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71178:df86bfb8787d Date: 2014-05-01 21:37 -0400 http://bitbucket.org/pypy/pypy/changeset/df86bfb8787d/ Log: merge default diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -885,6 +885,12 @@ Abc.__name__ = 'Def' assert Abc.__name__ == 'Def' raises(TypeError, "Abc.__name__ = 42") + try: + Abc.__name__ = 'G\x00hi' + except ValueError as e: + assert str(e) == "__name__ must not contain null bytes" + else: + assert False def test_compare(self): class A(object): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -626,7 +626,10 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise oefmt(space.w_TypeError, "can't set %N.__name__", w_type) - w_type.name = space.str_w(w_value) + name = space.str_w(w_value) + if '\x00' in name: + raise oefmt(space.w_ValueError, "__name__ must not contain null bytes") + w_type.name = name def descr_get__mro__(space, w_type): w_type = _check(space, w_type) From noreply at buildbot.pypy.org Fri May 2 05:48:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 05:48:57 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix _ast behavior Message-ID: <20140502034857.302571C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71179:4d193ee033eb Date: 2014-05-01 22:02 -0400 http://bitbucket.org/pypy/pypy/changeset/4d193ee033eb/ Log: fix _ast behavior diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -121,10 +121,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -669,10 +669,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -20,9 +20,9 @@ assert isinstance(ast.__version__, str) def test_flags(self): - skip("broken") from copy_reg import _HEAPTYPE - assert self.ast.Module.__flags__ & _HEAPTYPE + assert self.ast.AST.__flags__ & _HEAPTYPE == 0 + assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE def test_build_ast(self): ast = self.ast diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -997,6 +997,7 @@ w_self.weakrefable = w_self.instancetypedef.weakrefable w_self.w_doc = w_self.space.wrap(w_self.instancetypedef.doc) ensure_common_attributes(w_self) + w_self.flag_heaptype = '__module__' in w_self.instancetypedef.rawdict def ensure_common_attributes(w_self): ensure_static_new(w_self) From noreply at buildbot.pypy.org Fri May 2 05:48:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 05:48:58 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: no longer necessary Message-ID: <20140502034858.4878F1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71180:e9280554e26d Date: 2014-05-01 22:19 -0400 http://bitbucket.org/pypy/pypy/changeset/e9280554e26d/ Log: no longer necessary diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -490,7 +490,6 @@ W_ArrayBase.typedef = TypeDef( 'array.array', __new__ = interp2app(w_array), - __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -247,7 +247,6 @@ 'exceptions.' + name, base.typedef, __doc__ = W_Exc.__doc__, - __module__ = 'exceptions', **kwargs ) W_Exc.typedef.applevel_subclasses_base = realbase From noreply at buildbot.pypy.org Fri May 2 05:48:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 05:48:59 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: different approach for making _ast classes heaptypes Message-ID: <20140502034859.7C0F01C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71181:f84ebaac41fd Date: 2014-05-01 23:48 -0400 http://bitbucket.org/pypy/pypy/changeset/f84ebaac41fd/ Log: different approach for making _ast classes heaptypes diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2803,6 +2803,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(mod)), ) +mod.typedef.heaptype = True def Module_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2850,6 +2851,7 @@ __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) +Module.typedef.heaptype = True def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2897,6 +2899,7 @@ __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) +Interactive.typedef.heaptype = True def Expression_get_body(space, w_self): if w_self.w_dict is not None: @@ -2950,6 +2953,7 @@ __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) +Expression.typedef.heaptype = True def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2997,6 +3001,7 @@ __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) +Suite.typedef.heaptype = True def stmt_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -3062,6 +3067,7 @@ col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) +stmt.typedef.heaptype = True def FunctionDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3190,6 +3196,7 @@ __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) +FunctionDef.typedef.heaptype = True def ClassDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3314,6 +3321,7 @@ __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) +ClassDef.typedef.heaptype = True def Return_get_value(space, w_self): if w_self.w_dict is not None: @@ -3367,6 +3375,7 @@ __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) +Return.typedef.heaptype = True def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3414,6 +3423,7 @@ __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) +Delete.typedef.heaptype = True def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3491,6 +3501,7 @@ __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) +Assign.typedef.heaptype = True def AugAssign_get_target(space, w_self): if w_self.w_dict is not None: @@ -3604,6 +3615,7 @@ __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) +AugAssign.typedef.heaptype = True def Print_get_dest(space, w_self): if w_self.w_dict is not None: @@ -3710,6 +3722,7 @@ __new__=interp2app(get_AST_new(Print)), __init__=interp2app(Print_init), ) +Print.typedef.heaptype = True def For_get_target(space, w_self): if w_self.w_dict is not None: @@ -3841,6 +3854,7 @@ __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) +For.typedef.heaptype = True def While_get_test(space, w_self): if w_self.w_dict is not None: @@ -3942,6 +3956,7 @@ __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) +While.typedef.heaptype = True def If_get_test(space, w_self): if w_self.w_dict is not None: @@ -4043,6 +4058,7 @@ __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) +If.typedef.heaptype = True def With_get_context_expr(space, w_self): if w_self.w_dict is not None: @@ -4150,6 +4166,7 @@ __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) +With.typedef.heaptype = True def Raise_get_type(space, w_self): if w_self.w_dict is not None: @@ -4263,6 +4280,7 @@ __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) +Raise.typedef.heaptype = True def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4358,6 +4376,7 @@ __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) +TryExcept.typedef.heaptype = True def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4429,6 +4448,7 @@ __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) +TryFinally.typedef.heaptype = True def Assert_get_test(space, w_self): if w_self.w_dict is not None: @@ -4512,6 +4532,7 @@ __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) +Assert.typedef.heaptype = True def Import_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4559,6 +4580,7 @@ __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) +Import.typedef.heaptype = True def ImportFrom_get_module(space, w_self): if w_self.w_dict is not None: @@ -4667,6 +4689,7 @@ __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) +ImportFrom.typedef.heaptype = True def Exec_get_body(space, w_self): if w_self.w_dict is not None: @@ -4780,6 +4803,7 @@ __new__=interp2app(get_AST_new(Exec)), __init__=interp2app(Exec_init), ) +Exec.typedef.heaptype = True def Global_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4827,6 +4851,7 @@ __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) +Global.typedef.heaptype = True def Expr_get_value(space, w_self): if w_self.w_dict is not None: @@ -4880,6 +4905,7 @@ __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) +Expr.typedef.heaptype = True def Pass_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Pass, w_self) @@ -4897,6 +4923,7 @@ __new__=interp2app(get_AST_new(Pass)), __init__=interp2app(Pass_init), ) +Pass.typedef.heaptype = True def Break_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Break, w_self) @@ -4914,6 +4941,7 @@ __new__=interp2app(get_AST_new(Break)), __init__=interp2app(Break_init), ) +Break.typedef.heaptype = True def Continue_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Continue, w_self) @@ -4931,6 +4959,7 @@ __new__=interp2app(get_AST_new(Continue)), __init__=interp2app(Continue_init), ) +Continue.typedef.heaptype = True def expr_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -4996,6 +5025,7 @@ col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) +expr.typedef.heaptype = True def BoolOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5073,6 +5103,7 @@ __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) +BoolOp.typedef.heaptype = True def BinOp_get_left(space, w_self): if w_self.w_dict is not None: @@ -5186,6 +5217,7 @@ __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) +BinOp.typedef.heaptype = True def UnaryOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5269,6 +5301,7 @@ __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) +UnaryOp.typedef.heaptype = True def Lambda_get_args(space, w_self): if w_self.w_dict is not None: @@ -5350,6 +5383,7 @@ __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) +Lambda.typedef.heaptype = True def IfExp_get_test(space, w_self): if w_self.w_dict is not None: @@ -5463,6 +5497,7 @@ __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) +IfExp.typedef.heaptype = True def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: @@ -5534,6 +5569,7 @@ __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) +Dict.typedef.heaptype = True def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -5581,6 +5617,7 @@ __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) +Set.typedef.heaptype = True def ListComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5658,6 +5695,7 @@ __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) +ListComp.typedef.heaptype = True def SetComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5735,6 +5773,7 @@ __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) +SetComp.typedef.heaptype = True def DictComp_get_key(space, w_self): if w_self.w_dict is not None: @@ -5842,6 +5881,7 @@ __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) +DictComp.typedef.heaptype = True def GeneratorExp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5919,6 +5959,7 @@ __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) +GeneratorExp.typedef.heaptype = True def Yield_get_value(space, w_self): if w_self.w_dict is not None: @@ -5972,6 +6013,7 @@ __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +Yield.typedef.heaptype = True def Compare_get_left(space, w_self): if w_self.w_dict is not None: @@ -6073,6 +6115,7 @@ __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) +Compare.typedef.heaptype = True def Call_get_func(space, w_self): if w_self.w_dict is not None: @@ -6234,6 +6277,7 @@ __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) +Call.typedef.heaptype = True def Repr_get_value(space, w_self): if w_self.w_dict is not None: @@ -6287,6 +6331,7 @@ __new__=interp2app(get_AST_new(Repr)), __init__=interp2app(Repr_init), ) +Repr.typedef.heaptype = True def Num_get_n(space, w_self): if w_self.w_dict is not None: @@ -6339,6 +6384,7 @@ __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) +Num.typedef.heaptype = True def Str_get_s(space, w_self): if w_self.w_dict is not None: @@ -6391,6 +6437,7 @@ __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) +Str.typedef.heaptype = True def Attribute_get_value(space, w_self): if w_self.w_dict is not None: @@ -6503,6 +6550,7 @@ __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) +Attribute.typedef.heaptype = True def Subscript_get_value(space, w_self): if w_self.w_dict is not None: @@ -6616,6 +6664,7 @@ __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) +Subscript.typedef.heaptype = True def Name_get_id(space, w_self): if w_self.w_dict is not None: @@ -6698,6 +6747,7 @@ __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) +Name.typedef.heaptype = True def List_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6775,6 +6825,7 @@ __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) +List.typedef.heaptype = True def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6852,6 +6903,7 @@ __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) +Tuple.typedef.heaptype = True def Const_get_value(space, w_self): if w_self.w_dict is not None: @@ -6904,6 +6956,7 @@ __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) +Const.typedef.heaptype = True expr_context.typedef = typedef.TypeDef("expr_context", AST.typedef, @@ -6911,6 +6964,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(expr_context)), ) +expr_context.typedef.heaptype = True _Load.typedef = typedef.TypeDef("Load", expr_context.typedef, @@ -6918,6 +6972,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Load)), ) +_Load.typedef.heaptype = True _Store.typedef = typedef.TypeDef("Store", expr_context.typedef, @@ -6925,6 +6980,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Store)), ) +_Store.typedef.heaptype = True _Del.typedef = typedef.TypeDef("Del", expr_context.typedef, @@ -6932,6 +6988,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Del)), ) +_Del.typedef.heaptype = True _AugLoad.typedef = typedef.TypeDef("AugLoad", expr_context.typedef, @@ -6939,6 +6996,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugLoad)), ) +_AugLoad.typedef.heaptype = True _AugStore.typedef = typedef.TypeDef("AugStore", expr_context.typedef, @@ -6946,6 +7004,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugStore)), ) +_AugStore.typedef.heaptype = True _Param.typedef = typedef.TypeDef("Param", expr_context.typedef, @@ -6953,6 +7012,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Param)), ) +_Param.typedef.heaptype = True slice.typedef = typedef.TypeDef("slice", AST.typedef, @@ -6960,6 +7020,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(slice)), ) +slice.typedef.heaptype = True def Ellipsis_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Ellipsis, w_self) @@ -6977,6 +7038,7 @@ __new__=interp2app(get_AST_new(Ellipsis)), __init__=interp2app(Ellipsis_init), ) +Ellipsis.typedef.heaptype = True def Slice_get_lower(space, w_self): if w_self.w_dict is not None: @@ -7090,6 +7152,7 @@ __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) +Slice.typedef.heaptype = True def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: @@ -7137,6 +7200,7 @@ __new__=interp2app(get_AST_new(ExtSlice)), __init__=interp2app(ExtSlice_init), ) +ExtSlice.typedef.heaptype = True def Index_get_value(space, w_self): if w_self.w_dict is not None: @@ -7190,6 +7254,7 @@ __new__=interp2app(get_AST_new(Index)), __init__=interp2app(Index_init), ) +Index.typedef.heaptype = True boolop.typedef = typedef.TypeDef("boolop", AST.typedef, @@ -7197,6 +7262,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(boolop)), ) +boolop.typedef.heaptype = True _And.typedef = typedef.TypeDef("And", boolop.typedef, @@ -7204,6 +7270,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_And)), ) +_And.typedef.heaptype = True _Or.typedef = typedef.TypeDef("Or", boolop.typedef, @@ -7211,6 +7278,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Or)), ) +_Or.typedef.heaptype = True operator.typedef = typedef.TypeDef("operator", AST.typedef, @@ -7218,6 +7286,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(operator)), ) +operator.typedef.heaptype = True _Add.typedef = typedef.TypeDef("Add", operator.typedef, @@ -7225,6 +7294,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Add)), ) +_Add.typedef.heaptype = True _Sub.typedef = typedef.TypeDef("Sub", operator.typedef, @@ -7232,6 +7302,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Sub)), ) +_Sub.typedef.heaptype = True _Mult.typedef = typedef.TypeDef("Mult", operator.typedef, @@ -7239,6 +7310,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mult)), ) +_Mult.typedef.heaptype = True _Div.typedef = typedef.TypeDef("Div", operator.typedef, @@ -7246,6 +7318,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Div)), ) +_Div.typedef.heaptype = True _Mod.typedef = typedef.TypeDef("Mod", operator.typedef, @@ -7253,6 +7326,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mod)), ) +_Mod.typedef.heaptype = True _Pow.typedef = typedef.TypeDef("Pow", operator.typedef, @@ -7260,6 +7334,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Pow)), ) +_Pow.typedef.heaptype = True _LShift.typedef = typedef.TypeDef("LShift", operator.typedef, @@ -7267,6 +7342,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LShift)), ) +_LShift.typedef.heaptype = True _RShift.typedef = typedef.TypeDef("RShift", operator.typedef, @@ -7274,6 +7350,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_RShift)), ) +_RShift.typedef.heaptype = True _BitOr.typedef = typedef.TypeDef("BitOr", operator.typedef, @@ -7281,6 +7358,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitOr)), ) +_BitOr.typedef.heaptype = True _BitXor.typedef = typedef.TypeDef("BitXor", operator.typedef, @@ -7288,6 +7366,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitXor)), ) +_BitXor.typedef.heaptype = True _BitAnd.typedef = typedef.TypeDef("BitAnd", operator.typedef, @@ -7295,6 +7374,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitAnd)), ) +_BitAnd.typedef.heaptype = True _FloorDiv.typedef = typedef.TypeDef("FloorDiv", operator.typedef, @@ -7302,6 +7382,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_FloorDiv)), ) +_FloorDiv.typedef.heaptype = True unaryop.typedef = typedef.TypeDef("unaryop", AST.typedef, @@ -7309,6 +7390,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(unaryop)), ) +unaryop.typedef.heaptype = True _Invert.typedef = typedef.TypeDef("Invert", unaryop.typedef, @@ -7316,6 +7398,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Invert)), ) +_Invert.typedef.heaptype = True _Not.typedef = typedef.TypeDef("Not", unaryop.typedef, @@ -7323,6 +7406,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Not)), ) +_Not.typedef.heaptype = True _UAdd.typedef = typedef.TypeDef("UAdd", unaryop.typedef, @@ -7330,6 +7414,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_UAdd)), ) +_UAdd.typedef.heaptype = True _USub.typedef = typedef.TypeDef("USub", unaryop.typedef, @@ -7337,6 +7422,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_USub)), ) +_USub.typedef.heaptype = True cmpop.typedef = typedef.TypeDef("cmpop", AST.typedef, @@ -7344,6 +7430,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(cmpop)), ) +cmpop.typedef.heaptype = True _Eq.typedef = typedef.TypeDef("Eq", cmpop.typedef, @@ -7351,6 +7438,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Eq)), ) +_Eq.typedef.heaptype = True _NotEq.typedef = typedef.TypeDef("NotEq", cmpop.typedef, @@ -7358,6 +7446,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotEq)), ) +_NotEq.typedef.heaptype = True _Lt.typedef = typedef.TypeDef("Lt", cmpop.typedef, @@ -7365,6 +7454,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Lt)), ) +_Lt.typedef.heaptype = True _LtE.typedef = typedef.TypeDef("LtE", cmpop.typedef, @@ -7372,6 +7462,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LtE)), ) +_LtE.typedef.heaptype = True _Gt.typedef = typedef.TypeDef("Gt", cmpop.typedef, @@ -7379,6 +7470,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Gt)), ) +_Gt.typedef.heaptype = True _GtE.typedef = typedef.TypeDef("GtE", cmpop.typedef, @@ -7386,6 +7478,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_GtE)), ) +_GtE.typedef.heaptype = True _Is.typedef = typedef.TypeDef("Is", cmpop.typedef, @@ -7393,6 +7486,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Is)), ) +_Is.typedef.heaptype = True _IsNot.typedef = typedef.TypeDef("IsNot", cmpop.typedef, @@ -7400,6 +7494,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_IsNot)), ) +_IsNot.typedef.heaptype = True _In.typedef = typedef.TypeDef("In", cmpop.typedef, @@ -7407,6 +7502,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_In)), ) +_In.typedef.heaptype = True _NotIn.typedef = typedef.TypeDef("NotIn", cmpop.typedef, @@ -7414,6 +7510,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotIn)), ) +_NotIn.typedef.heaptype = True def comprehension_get_target(space, w_self): if w_self.w_dict is not None: @@ -7521,6 +7618,7 @@ __new__=interp2app(get_AST_new(comprehension)), __init__=interp2app(comprehension_init), ) +comprehension.typedef.heaptype = True def excepthandler_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -7586,6 +7684,7 @@ col_offset=typedef.GetSetProperty(excepthandler_get_col_offset, excepthandler_set_col_offset, excepthandler_del_col_offset, cls=excepthandler), __new__=interp2app(get_AST_new(excepthandler)), ) +excepthandler.typedef.heaptype = True def ExceptHandler_get_type(space, w_self): if w_self.w_dict is not None: @@ -7693,6 +7792,7 @@ __new__=interp2app(get_AST_new(ExceptHandler)), __init__=interp2app(ExceptHandler_init), ) +ExceptHandler.typedef.heaptype = True def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: @@ -7828,6 +7928,7 @@ __new__=interp2app(get_AST_new(arguments)), __init__=interp2app(arguments_init), ) +arguments.typedef.heaptype = True def keyword_get_arg(space, w_self): if w_self.w_dict is not None: @@ -7910,6 +8011,7 @@ __new__=interp2app(get_AST_new(keyword)), __init__=interp2app(keyword_init), ) +keyword.typedef.heaptype = True def alias_get_name(space, w_self): if w_self.w_dict is not None: @@ -7994,4 +8096,5 @@ __new__=interp2app(get_AST_new(alias)), __init__=interp2app(alias_init), ) - +alias.typedef.heaptype = True + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -352,6 +352,7 @@ if needs_init: self.emit("__init__=interp2app(%s_init)," % (name,), 1) self.emit(")") + self.emit("%s.typedef.heaptype = True" % name) self.emit("") def make_init(self, name, fields): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -22,6 +22,7 @@ else: bases = [__base] self.bases = bases + self.heaptype = False self.hasdict = '__dict__' in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.pop('__doc__', None) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -997,7 +997,7 @@ w_self.weakrefable = w_self.instancetypedef.weakrefable w_self.w_doc = w_self.space.wrap(w_self.instancetypedef.doc) ensure_common_attributes(w_self) - w_self.flag_heaptype = '__module__' in w_self.instancetypedef.rawdict + w_self.flag_heaptype = w_self.instancetypedef.heaptype def ensure_common_attributes(w_self): ensure_static_new(w_self) From noreply at buildbot.pypy.org Fri May 2 08:03:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:03:03 +0200 (CEST) Subject: [pypy-commit] pypy default: fix numpy.nditer constructor Message-ID: <20140502060303.D68C91C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71182:497fe4c88cd9 Date: 2014-05-02 00:39 -0400 http://bitbucket.org/pypy/pypy/changeset/497fe4c88cd9/ Log: fix numpy.nditer constructor diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,7 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'nditer.nditer', + 'nditer': 'nditer.W_NDIter', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -492,13 +492,15 @@ w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) -def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): +def descr__new__(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes, + w_casting, w_op_axes, w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, w_buffersize, order) -W_NDIter.typedef = TypeDef( - 'nditer', +W_NDIter.typedef = TypeDef('nditer', + __module__ = 'numpy', + __new__ = interp2app(descr__new__), + __iter__ = interp2app(W_NDIter.descr_iter), __getitem__ = interp2app(W_NDIter.descr_getitem), __setitem__ = interp2app(W_NDIter.descr_setitem), @@ -530,3 +532,4 @@ shape = GetSetProperty(W_NDIter.descr_get_shape), value = GetSetProperty(W_NDIter.descr_get_value), ) +W_NDIter.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -3,6 +3,19 @@ class AppTestNDIter(BaseNumpyAppTest): + def test_type(self): + import numpy as np + assert type(np.nditer) is type + assert np.nditer.__name__ == 'nditer' + assert np.nditer.__module__ == 'numpy' + try: + class Sub(np.nditer): + pass + except TypeError as e: + assert "not an acceptable base" in str(e) + else: + assert False + def test_basic(self): from numpy import arange, nditer, ndarray a = arange(6).reshape(2,3) From noreply at buildbot.pypy.org Fri May 2 08:03:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:03:05 +0200 (CEST) Subject: [pypy-commit] pypy default: fix numpy dtype name attribute Message-ID: <20140502060305.0D8871C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71183:0f75ad4d14ce Date: 2014-05-02 01:00 -0400 http://bitbucket.org/pypy/pypy/changeset/0f75ad4d14ce/ Log: fix numpy dtype name attribute diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -131,12 +131,13 @@ return dtype def get_name(self): - return self.w_box_type.name + name = self.w_box_type.name + if name.endswith('_'): + name = name[:-1] + return name def descr_get_name(self, space): name = self.get_name() - if name[-1] == '_': - name = name[:-1] if self.is_flexible() and self.elsize != 0: return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) @@ -819,7 +820,7 @@ w_box_type=space.gettypefor(boxes.W_ULongBox), ) aliases = { - NPY.BOOL: ['bool', 'bool8'], + NPY.BOOL: ['bool_', 'bool8'], NPY.BYTE: ['byte'], NPY.UBYTE: ['ubyte'], NPY.SHORT: ['short'], @@ -834,8 +835,8 @@ NPY.CFLOAT: ['csingle'], NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], - NPY.STRING: ['string', 'str'], - NPY.UNICODE: ['unicode'], + NPY.STRING: ['string_', 'str'], + NPY.UNICODE: ['unicode_'], } self.alternate_constructors = { NPY.BOOL: [space.w_bool], diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -47,6 +47,7 @@ assert d.kind == 'b' assert dtype(d) is d assert dtype('bool') is d + assert dtype('bool_') is d assert dtype('|b1') is d b = '>' if sys.byteorder == 'little' else '<' assert dtype(b + 'i4') is not dtype(b + 'i4') @@ -63,10 +64,12 @@ assert dtype(int).names is None assert dtype(int).hasobject is False assert dtype(int).subdtype is None + assert dtype(str) is dtype('string') is dtype('string_') + assert dtype(unicode) is dtype('unicode') is dtype('unicode_') assert dtype(None) is dtype(float) - for d in [dtype('i4')]: + for d in [dtype('i4'), dtype('bool')]: for key in ["d[2]", "d['z']", "d[None]"]: exc = raises(KeyError, key) assert exc.value[0] == "There are no fields in dtype %s." % str(d) From noreply at buildbot.pypy.org Fri May 2 08:03:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:03:06 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: merge default Message-ID: <20140502060306.3C3441C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71184:637d5d79bc5f Date: 2014-05-02 01:03 -0400 http://bitbucket.org/pypy/pypy/changeset/637d5d79bc5f/ Log: merge default diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,7 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'nditer.nditer', + 'nditer': 'nditer.W_NDIter', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -131,12 +131,13 @@ return dtype def get_name(self): - return self.w_box_type.name + name = self.w_box_type.name + if name.endswith('_'): + name = name[:-1] + return name def descr_get_name(self, space): name = self.get_name() - if name[-1] == '_': - name = name[:-1] if self.is_flexible() and self.elsize != 0: return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) @@ -819,7 +820,7 @@ w_box_type=space.gettypefor(boxes.W_ULongBox), ) aliases = { - NPY.BOOL: ['bool', 'bool8'], + NPY.BOOL: ['bool_', 'bool8'], NPY.BYTE: ['byte'], NPY.UBYTE: ['ubyte'], NPY.SHORT: ['short'], @@ -834,8 +835,8 @@ NPY.CFLOAT: ['csingle'], NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], - NPY.STRING: ['string', 'str'], - NPY.UNICODE: ['unicode'], + NPY.STRING: ['string_', 'str'], + NPY.UNICODE: ['unicode_'], } self.alternate_constructors = { NPY.BOOL: [space.w_bool], diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -492,13 +492,15 @@ w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) -def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): +def descr__new__(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes, + w_casting, w_op_axes, w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, w_buffersize, order) -W_NDIter.typedef = TypeDef( - 'nditer', +W_NDIter.typedef = TypeDef('nditer', + __module__ = 'numpy', + __new__ = interp2app(descr__new__), + __iter__ = interp2app(W_NDIter.descr_iter), __getitem__ = interp2app(W_NDIter.descr_getitem), __setitem__ = interp2app(W_NDIter.descr_setitem), @@ -530,3 +532,4 @@ shape = GetSetProperty(W_NDIter.descr_get_shape), value = GetSetProperty(W_NDIter.descr_get_value), ) +W_NDIter.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -47,6 +47,7 @@ assert d.kind == 'b' assert dtype(d) is d assert dtype('bool') is d + assert dtype('bool_') is d assert dtype('|b1') is d b = '>' if sys.byteorder == 'little' else '<' assert dtype(b + 'i4') is not dtype(b + 'i4') @@ -63,10 +64,12 @@ assert dtype(int).names is None assert dtype(int).hasobject is False assert dtype(int).subdtype is None + assert dtype(str) is dtype('string') is dtype('string_') + assert dtype(unicode) is dtype('unicode') is dtype('unicode_') assert dtype(None) is dtype(float) - for d in [dtype('i4')]: + for d in [dtype('i4'), dtype('bool')]: for key in ["d[2]", "d['z']", "d[None]"]: exc = raises(KeyError, key) assert exc.value[0] == "There are no fields in dtype %s." % str(d) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -3,6 +3,19 @@ class AppTestNDIter(BaseNumpyAppTest): + def test_type(self): + import numpy as np + assert type(np.nditer) is type + assert np.nditer.__name__ == 'nditer' + assert np.nditer.__module__ == 'numpy' + try: + class Sub(np.nditer): + pass + except TypeError as e: + assert "not an acceptable base" in str(e) + else: + assert False + def test_basic(self): from numpy import arange, nditer, ndarray a = arange(6).reshape(2,3) From noreply at buildbot.pypy.org Fri May 2 08:03:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:03:07 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix numpy typedefs Message-ID: <20140502060307.6F7BF1C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71185:2ae1301ce7c5 Date: 2014-05-02 01:02 -0400 http://bitbucket.org/pypy/pypy/changeset/2ae1301ce7c5/ Log: fix numpy typedefs diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -546,9 +546,7 @@ return W_UnicodeBox(arr, 0, arr.dtype) -W_GenericBox.typedef = TypeDef("generic", - __module__ = "numpy", - +W_GenericBox.typedef = TypeDef("numpy.generic", __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), @@ -639,181 +637,151 @@ flags = GetSetProperty(W_GenericBox.descr_get_flags), ) -W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, - __module__ = "numpy", +W_BoolBox.typedef = TypeDef("numpy.bool_", W_GenericBox.typedef, __new__ = interp2app(W_BoolBox.descr__new__.im_func), __index__ = interp2app(W_BoolBox.descr_index), __reduce__ = interp2app(W_BoolBox.descr_reduce), ) -W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, - __module__ = "numpy", +W_NumberBox.typedef = TypeDef("numpy.number", W_GenericBox.typedef, ) -W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, - __module__ = "numpy", +W_IntegerBox.typedef = TypeDef("numpy.integer", W_NumberBox.typedef, ) -W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, - __module__ = "numpy", +W_SignedIntegerBox.typedef = TypeDef("numpy.signedinteger", W_IntegerBox.typedef, ) -W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, - __module__ = "numpy", +W_UnsignedIntegerBox.typedef = TypeDef("numpy.unsignedinteger", W_IntegerBox.typedef, ) -W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, - __module__ = "numpy", +W_Int8Box.typedef = TypeDef("numpy.int8", W_SignedIntegerBox.typedef, __new__ = interp2app(W_Int8Box.descr__new__.im_func), __index__ = interp2app(W_Int8Box.descr_index), __reduce__ = interp2app(W_Int8Box.descr_reduce), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt8Box.typedef = TypeDef("numpy.uint8", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt8Box.descr__new__.im_func), __index__ = interp2app(W_UInt8Box.descr_index), __reduce__ = interp2app(W_UInt8Box.descr_reduce), ) -W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, - __module__ = "numpy", +W_Int16Box.typedef = TypeDef("numpy.int16", W_SignedIntegerBox.typedef, __new__ = interp2app(W_Int16Box.descr__new__.im_func), __index__ = interp2app(W_Int16Box.descr_index), __reduce__ = interp2app(W_Int16Box.descr_reduce), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt16Box.typedef = TypeDef("numpy.uint16", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt16Box.descr__new__.im_func), __index__ = interp2app(W_UInt16Box.descr_index), __reduce__ = interp2app(W_UInt16Box.descr_reduce), ) -W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, - __module__ = "numpy", +W_Int32Box.typedef = TypeDef("numpy.int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __new__ = interp2app(W_Int32Box.descr__new__.im_func), __index__ = interp2app(W_Int32Box.descr_index), __reduce__ = interp2app(W_Int32Box.descr_reduce), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt32Box.typedef = TypeDef("numpy.uint32", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt32Box.descr__new__.im_func), __index__ = interp2app(W_UInt32Box.descr_index), __reduce__ = interp2app(W_UInt32Box.descr_reduce), ) -W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, - __module__ = "numpy", +W_Int64Box.typedef = TypeDef("numpy.int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __new__ = interp2app(W_Int64Box.descr__new__.im_func), __index__ = interp2app(W_Int64Box.descr_index), __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt64Box.typedef = TypeDef("numpy.uint64", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt64Box.descr__new__.im_func), __index__ = interp2app(W_UInt64Box.descr_index), __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) -W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, +W_LongBox.typedef = TypeDef("numpy.int%d" % LONG_BIT, (W_SignedIntegerBox.typedef, W_IntObject.typedef), - __module__ = "numpy", __new__ = interp2app(W_LongBox.descr__new__.im_func), __index__ = interp2app(W_LongBox.descr_index), __reduce__ = interp2app(W_LongBox.descr_reduce), ) -W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_ULongBox.typedef = TypeDef("numpy.uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_ULongBox.descr__new__.im_func), __index__ = interp2app(W_ULongBox.descr_index), __reduce__ = interp2app(W_ULongBox.descr_reduce), ) -W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, - __module__ = "numpy", +W_InexactBox.typedef = TypeDef("numpy.inexact", W_NumberBox.typedef, ) -W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, - __module__ = "numpy", +W_FloatingBox.typedef = TypeDef("numpy.floating", W_InexactBox.typedef, ) -W_Float16Box.typedef = TypeDef("float16", W_FloatingBox.typedef, - __module__ = "numpy", +W_Float16Box.typedef = TypeDef("numpy.float16", W_FloatingBox.typedef, __new__ = interp2app(W_Float16Box.descr__new__.im_func), __reduce__ = interp2app(W_Float16Box.descr_reduce), ) -W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, - __module__ = "numpy", +W_Float32Box.typedef = TypeDef("numpy.float32", W_FloatingBox.typedef, __new__ = interp2app(W_Float32Box.descr__new__.im_func), __reduce__ = interp2app(W_Float32Box.descr_reduce), ) -W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), - __module__ = "numpy", +W_Float64Box.typedef = TypeDef("numpy.float64", (W_FloatingBox.typedef, float_typedef), __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) -W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, - __module__ = "numpy", +W_ComplexFloatingBox.typedef = TypeDef("numpy.complexfloating", W_InexactBox.typedef, ) -W_Complex64Box.typedef = TypeDef("complex64", (W_ComplexFloatingBox.typedef), - __module__ = "numpy", +W_Complex64Box.typedef = TypeDef("numpy.complex64", (W_ComplexFloatingBox.typedef), __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), ) -W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", +W_Complex128Box.typedef = TypeDef("numpy.complex128", (W_ComplexFloatingBox.typedef, complex_typedef), __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), ) if long_double_size in (8, 12, 16): - W_FloatLongBox.typedef = TypeDef("float%d" % (long_double_size * 8), (W_FloatingBox.typedef), - __module__ = "numpy", + W_FloatLongBox.typedef = TypeDef("numpy.float%d" % (long_double_size * 8), (W_FloatingBox.typedef), __new__ = interp2app(W_FloatLongBox.descr__new__.im_func), __reduce__ = interp2app(W_FloatLongBox.descr_reduce), ) - W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", + W_ComplexLongBox.typedef = TypeDef("numpy.complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), ) -W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, - __module__ = "numpy", +W_FlexibleBox.typedef = TypeDef("numpy.flexible", W_GenericBox.typedef, ) -W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, - __module__ = "numpy", +W_VoidBox.typedef = TypeDef("numpy.void", W_FlexibleBox.typedef, __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), ) -W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, - __module__ = "numpy", +W_CharacterBox.typedef = TypeDef("numpy.character", W_FlexibleBox.typedef, ) -W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, W_BytesObject.typedef), - __module__ = "numpy", +W_StringBox.typedef = TypeDef("numpy.string_", (W_CharacterBox.typedef, W_BytesObject.typedef), __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), __len__ = interp2app(W_StringBox.descr_len), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), - __module__ = "numpy", +W_UnicodeBox.typedef = TypeDef("numpy.unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), ) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -132,6 +132,8 @@ def get_name(self): name = self.w_box_type.name + if name.startswith('numpy.'): + name = name[6:] if name.endswith('_'): name = name[:-1] return name @@ -557,8 +559,7 @@ raise oefmt(space.w_TypeError, "data type not understood") -W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", +W_Dtype.typedef = TypeDef("numpy.dtype", __new__ = interp2app(descr__new__), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -62,8 +62,7 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) -W_FlagsObject.typedef = TypeDef("flagsobj", - __module__ = "numpy", +W_FlagsObject.typedef = TypeDef("numpy.flagsobj", __new__ = interp2app(W_FlagsObject.descr__new__.im_func), __getitem__ = interp2app(W_FlagsObject.descr_getitem), diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1318,8 +1318,7 @@ return result """, filename=__file__).interphook('searchsort') -W_NDimArray.typedef = TypeDef("ndarray", - __module__ = "numpy", +W_NDimArray.typedef = TypeDef("numpy.ndarray", __new__ = interp2app(descr_new_array), __len__ = interp2app(W_NDimArray.descr_len), @@ -1486,8 +1485,7 @@ return descr_new_array(space, w_subtype, w_shape, w_dtype) -W_FlatIterator.typedef = TypeDef("flatiter", - __module__ = "numpy", +W_FlatIterator.typedef = TypeDef("numpy.flatiter", __iter__ = interp2app(W_FlatIterator.descr_iter), __getitem__ = interp2app(W_FlatIterator.descr_getitem), __setitem__ = interp2app(W_FlatIterator.descr_setitem), diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -497,8 +497,7 @@ return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, w_buffersize, order) -W_NDIter.typedef = TypeDef('nditer', - __module__ = 'numpy', +W_NDIter.typedef = TypeDef('numpy.nditer', __new__ = interp2app(descr__new__), __iter__ = interp2app(W_NDIter.descr_iter), diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -457,9 +457,7 @@ res_dtype, w_lhs, w_rhs, out) -W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", - +W_Ufunc.typedef = TypeDef("numpy.ufunc", __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), __name__ = GetSetProperty(W_Ufunc.descr_get_name), From noreply at buildbot.pypy.org Fri May 2 08:03:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:03:08 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: remove some more __module__s Message-ID: <20140502060308.ACE401C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71186:a6480afa6617 Date: 2014-05-02 01:13 -0400 http://bitbucket.org/pypy/pypy/changeset/a6480afa6617/ Log: remove some more __module__s diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -56,8 +56,7 @@ raise MiniBuffer.typedef = TypeDef( - "buffer", - __module__ = "_cffi_backend", + "_cffi_backend.buffer", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -441,7 +441,7 @@ W_CData.typedef = TypeDef( - 'CData', + '_cffi_backend.CData', __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -130,8 +130,7 @@ return self.ctitem.convert_to_object(result) W_CDataIter.typedef = TypeDef( - 'CDataIter', - __module__ = '_cffi_backend', + '_cffi_backend.CDataIter', __iter__ = interp2app(W_CDataIter.iter_w), next = interp2app(W_CDataIter.next_w), ) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -210,8 +210,7 @@ W_CType.typedef = TypeDef( - 'CTypeDescr', - __module__ = '_cffi_backend', + '_cffi_backend.CTypeDescr', __repr__ = interp2app(W_CType.repr), __weakref__ = make_weakref_descr(W_CType), kind = GetSetProperty(W_CType.fget_kind, doc="kind"), diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -307,8 +307,7 @@ W_CField.typedef = TypeDef( - 'CField', - __module__ = '_cffi_backend', + '_cffi_backend.CField', type = interp_attrproperty('ctype', W_CField), offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -85,8 +85,7 @@ W_Library.typedef = TypeDef( - 'Library', - __module__ = '_cffi_backend', + '_cffi_backend.Library', __repr__ = interp2app(W_Library.repr), load_function = interp2app(W_Library.load_function), read_variable = interp2app(W_Library.read_variable), diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -463,11 +463,10 @@ W_Deque.__init__(space.interp_w(W_Deque, w_self), space) return w_self -W_Deque.typedef = TypeDef("deque", +W_Deque.typedef = TypeDef("collections.deque", __doc__ = """deque(iterable[, maxlen]) --> deque object Build an ordered collection accessible from endpoints only.""", - __module__ = '_collections', __new__ = interp2app(descr__new__), __init__ = interp2app(W_Deque.init), append = interp2app(W_Deque.append), diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -4,6 +4,8 @@ def test_basics(self): from _collections import deque + assert deque.__module__ == 'collections' + d = deque(xrange(-5125, -5000)) d.__init__(xrange(200)) for i in xrange(200, 400): diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -154,8 +154,7 @@ W_Dialect.typedef = TypeDef( - 'Dialect', - __module__ = '_csv', + '_csv.Dialect', __new__ = interp2app(W_Dialect___new__), delimiter = interp_attrproperty('delimiter', W_Dialect), diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -245,8 +245,7 @@ return W_Reader(space, dialect, w_iter) W_Reader.typedef = TypeDef( - 'reader', - __module__ = '_csv', + '_csv.reader', dialect = interp_attrproperty_w('dialect', W_Reader), line_num = interp_attrproperty('line_num', W_Reader), __iter__ = interp2app(W_Reader.iter_w), diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -160,8 +160,7 @@ return W_Writer(space, dialect, w_fileobj) W_Writer.typedef = TypeDef( - 'writer', - __module__ = '_csv', + '_csv.writer', dialect = interp_attrproperty_w('dialect', W_Writer), writerow = interp2app(W_Writer.writerow), writerows = interp2app(W_Writer.writerows), diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -844,10 +844,9 @@ self.state = STATE_OK W_BufferedReader.typedef = TypeDef( - 'BufferedReader', W_BufferedIOBase.typedef, + '_io.BufferedReader', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedReader), __init__ = interp2app(W_BufferedReader.descr_init), - __module__ = "_io", read = interp2app(W_BufferedReader.read_w), peek = interp2app(W_BufferedReader.peek_w), @@ -892,10 +891,9 @@ self.state = STATE_OK W_BufferedWriter.typedef = TypeDef( - 'BufferedWriter', W_BufferedIOBase.typedef, + '_io.BufferedWriter', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedWriter), __init__ = interp2app(W_BufferedWriter.descr_init), - __module__ = "_io", write = interp2app(W_BufferedWriter.write_w), flush = interp2app(W_BufferedWriter.flush_w), @@ -1015,10 +1013,9 @@ self.state = STATE_OK W_BufferedRandom.typedef = TypeDef( - 'BufferedRandom', W_BufferedIOBase.typedef, + '_io.BufferedRandom', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRandom), __init__ = interp2app(W_BufferedRandom.descr_init), - __module__ = "_io", read = interp2app(W_BufferedRandom.read_w), peek = interp2app(W_BufferedRandom.peek_w), diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -264,8 +264,7 @@ W_StringIO.typedef = TypeDef( - 'StringIO', W_TextIOBase.typedef, - __module__ = "_io", + '_io.StringIO', W_TextIOBase.typedef, __new__ = generic_new_descr(W_StringIO), __init__ = interp2app(W_StringIO.descr_init), __getstate__ = interp2app(W_StringIO.descr_getstate), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1015,11 +1015,10 @@ self.chunk_size = size W_TextIOWrapper.typedef = TypeDef( - 'TextIOWrapper', W_TextIOBase.typedef, + '_io.TextIOWrapper', W_TextIOBase.typedef, __new__ = generic_new_descr(W_TextIOWrapper), __init__ = interp2app(W_TextIOWrapper.descr_init), __repr__ = interp2app(W_TextIOWrapper.descr_repr), - __module__ = "_io", next = interp2app(W_TextIOWrapper.next_w), read = interp2app(W_TextIOWrapper.read_w), diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -441,7 +441,6 @@ W_Profiler.typedef = TypeDef( '_lsprof.Profiler', - __module__ = '_lsprof', __new__ = interp2app(descr_new_profile), enable = interp2app(W_Profiler.enable), disable = interp2app(W_Profiler.disable), diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -353,9 +353,8 @@ return bool(r) W_FileConnection.typedef = TypeDef( - 'Connection', W_BaseConnection.typedef, + '_multiprocessing.Connection', W_BaseConnection.typedef, __new__ = interp2app(W_FileConnection.descr_new_file.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_FileConnection.fileno), ) @@ -534,8 +533,7 @@ if sys.platform == 'win32': W_PipeConnection.typedef = TypeDef( - 'PipeConnection', W_BaseConnection.typedef, + '_multiprocessing.PipeConnection', W_BaseConnection.typedef, __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_PipeConnection.fileno), ) diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -207,9 +207,8 @@ return interp2app(descr_new_base_exception) W_BaseException.typedef = TypeDef( - 'BaseException', + 'exceptions.BaseException', __doc__ = W_BaseException.__doc__, - __module__ = 'exceptions', __new__ = _new(W_BaseException), __init__ = interp2app(W_BaseException.descr_init), __str__ = interp2app(W_BaseException.descr_str), @@ -311,10 +310,9 @@ """) W_UnicodeTranslateError.typedef = TypeDef( - 'UnicodeTranslateError', + 'exceptions.UnicodeTranslateError', W_UnicodeError.typedef, __doc__ = W_UnicodeTranslateError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeTranslateError), __init__ = interp2app(W_UnicodeTranslateError.descr_init), __str__ = interp2app(W_UnicodeTranslateError.descr_str), @@ -395,10 +393,9 @@ return W_BaseException.descr_str(self, space) W_EnvironmentError.typedef = TypeDef( - 'EnvironmentError', + 'exceptions.EnvironmentError', W_StandardError.typedef, __doc__ = W_EnvironmentError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_EnvironmentError), __reduce__ = interp2app(W_EnvironmentError.descr_reduce), __init__ = interp2app(W_EnvironmentError.descr_init), @@ -452,10 +449,9 @@ _winerror_to_errno, _default_errno = {}, 22 # EINVAL W_WindowsError.typedef = TypeDef( - "WindowsError", + "exceptions.WindowsError", W_OSError.typedef, __doc__ = W_WindowsError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_WindowsError), __init__ = interp2app(W_WindowsError.descr_init), __str__ = interp2app(W_WindowsError.descr_str), @@ -556,14 +552,13 @@ return W_StandardError.descr_repr(self, space) W_SyntaxError.typedef = TypeDef( - 'SyntaxError', + 'exceptions.SyntaxError', W_StandardError.typedef, __new__ = _new(W_SyntaxError), __init__ = interp2app(W_SyntaxError.descr_init), __str__ = interp2app(W_SyntaxError.descr_str), __repr__ = interp2app(W_SyntaxError.descr_repr), __doc__ = W_SyntaxError.__doc__, - __module__ = 'exceptions', msg = readwrite_attrproperty_w('w_msg', W_SyntaxError), filename = readwrite_attrproperty_w('w_filename', W_SyntaxError), lineno = readwrite_attrproperty_w('w_lineno', W_SyntaxError), @@ -592,12 +587,11 @@ W_BaseException.descr_init(self, space, args_w) W_SystemExit.typedef = TypeDef( - 'SystemExit', + 'exceptions.SystemExit', W_BaseException.typedef, __new__ = _new(W_SystemExit), __init__ = interp2app(W_SystemExit.descr_init), __doc__ = W_SystemExit.__doc__, - __module__ = 'exceptions', code = readwrite_attrproperty_w('w_code', W_SystemExit) ) @@ -657,10 +651,9 @@ """) W_UnicodeDecodeError.typedef = TypeDef( - 'UnicodeDecodeError', + 'exceptions.UnicodeDecodeError', W_UnicodeError.typedef, __doc__ = W_UnicodeDecodeError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeDecodeError), __init__ = interp2app(W_UnicodeDecodeError.descr_init), __str__ = interp2app(W_UnicodeDecodeError.descr_str), @@ -752,10 +745,9 @@ """) W_UnicodeEncodeError.typedef = TypeDef( - 'UnicodeEncodeError', + 'exceptions.UnicodeEncodeError', W_UnicodeError.typedef, __doc__ = W_UnicodeEncodeError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeEncodeError), __init__ = interp2app(W_UnicodeEncodeError.descr_init), __str__ = interp2app(W_UnicodeEncodeError.descr_str), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -57,8 +57,7 @@ return space.wrap(r) W_Count.typedef = TypeDef( - 'count', - __module__ = 'itertools', + 'itertools.count', __new__ = interp2app(W_Count___new__), __iter__ = interp2app(W_Count.iter_w), next = interp2app(W_Count.next_w), @@ -120,8 +119,7 @@ return space.wrap(r) W_Repeat.typedef = TypeDef( - 'repeat', - __module__ = 'itertools', + 'itertools.repeat', __new__ = interp2app(W_Repeat___new__), __iter__ = interp2app(W_Repeat.iter_w), __length_hint__ = interp2app(W_Repeat.length_w), @@ -174,8 +172,7 @@ W_TakeWhile.typedef = TypeDef( - 'takewhile', - __module__ = 'itertools', + 'itertools.takewhile', __new__ = interp2app(W_TakeWhile___new__), __iter__ = interp2app(W_TakeWhile.iter_w), next = interp2app(W_TakeWhile.next_w), @@ -223,8 +220,7 @@ W_DropWhile.typedef = TypeDef( - 'dropwhile', - __module__ = 'itertools', + 'itertools.dropwhile', __new__ = interp2app(W_DropWhile___new__), __iter__ = interp2app(W_DropWhile.iter_w), next = interp2app(W_DropWhile.next_w), @@ -280,8 +276,7 @@ return space.wrap(r) W_IFilter.typedef = TypeDef( - 'ifilter', - __module__ = 'itertools', + 'itertools.ifilter', __new__ = interp2app(W_IFilter___new__), __iter__ = interp2app(W_IFilter.iter_w), next = interp2app(W_IFilter.next_w), @@ -308,8 +303,7 @@ return space.wrap(r) W_IFilterFalse.typedef = TypeDef( - 'ifilterfalse', - __module__ = 'itertools', + 'itertools.ifilterfalse', __new__ = interp2app(W_IFilterFalse___new__), __iter__ = interp2app(W_IFilterFalse.iter_w), next = interp2app(W_IFilterFalse.next_w), @@ -417,8 +411,7 @@ return space.wrap(r) W_ISlice.typedef = TypeDef( - 'islice', - __module__ = 'itertools', + 'itertools.islice', __new__ = interp2app(W_ISlice___new__), __iter__ = interp2app(W_ISlice.iter_w), next = interp2app(W_ISlice.next_w), @@ -482,8 +475,7 @@ return space.wrap(r) W_Chain.typedef = TypeDef( - 'chain', - __module__ = 'itertools', + 'itertools.chain', __new__ = interp2app(W_Chain___new__), __iter__ = interp2app(W_Chain.iter_w), next = interp2app(W_Chain.next_w), @@ -564,8 +556,7 @@ return space.wrap(r) W_IMap.typedef = TypeDef( - 'imap', - __module__ = 'itertools', + 'itertools.imap', __new__ = interp2app(W_IMap___new__), __iter__ = interp2app(W_IMap.iter_w), next = interp2app(W_IMap.next_w), @@ -609,8 +600,7 @@ return space.wrap(r) W_IZip.typedef = TypeDef( - 'izip', - __module__ = 'itertools', + 'itertools.izip', __new__ = interp2app(W_IZip___new__), __iter__ = interp2app(W_IZip.iter_w), next = interp2app(W_IZip.next_w), @@ -678,8 +668,7 @@ return space.wrap(self) W_IZipLongest.typedef = TypeDef( - 'izip_longest', - __module__ = 'itertools', + 'itertools.izip_longest', __new__ = interp2app(W_IZipLongest___new__), __iter__ = interp2app(W_IZipLongest.iter_w), next = interp2app(W_IZipLongest.next_w), @@ -737,8 +726,7 @@ return space.wrap(r) W_Cycle.typedef = TypeDef( - 'cycle', - __module__ = 'itertools', + 'itertools.cycle', __new__ = interp2app(W_Cycle___new__), __iter__ = interp2app(W_Cycle.iter_w), next = interp2app(W_Cycle.next_w), @@ -778,8 +766,7 @@ return space.wrap(r) W_StarMap.typedef = TypeDef( - 'starmap', - __module__ = 'itertools', + 'itertools.starmap', __new__ = interp2app(W_StarMap___new__), __iter__ = interp2app(W_StarMap.iter_w), next = interp2app(W_StarMap.next_w), @@ -879,8 +866,7 @@ myiter.chained_list)) W_TeeIterable.typedef = TypeDef( - '_tee', - __module__ = 'itertools', + 'itertools._tee', __new__ = interp2app(W_TeeIterable___new__), __iter__ = interp2app(W_TeeIterable.iter_w), next = interp2app(W_TeeIterable.next_w), @@ -983,8 +969,7 @@ return space.wrap(r) W_GroupBy.typedef = TypeDef( - 'groupby', - __module__ = 'itertools', + 'itertools.groupby', __new__ = interp2app(W_GroupBy___new__), __iter__ = interp2app(W_GroupBy.iter_w), next = interp2app(W_GroupBy.next_w), @@ -1031,8 +1016,7 @@ return w_obj W_GroupByIterator.typedef = TypeDef( - '_groupby', - __module__ = 'itertools', + 'itertools._groupby', __iter__ = interp2app(W_GroupByIterator.iter_w), next = interp2app(W_GroupByIterator.next_w)) W_GroupByIterator.typedef.acceptable_as_base_class = False @@ -1063,8 +1047,7 @@ return space.wrap(r) W_Compress.typedef = TypeDef( - 'compress', - __module__ = 'itertools', + 'itertools.compress', __new__ = interp2app(W_Compress__new__), __iter__ = interp2app(W_Compress.iter_w), next = interp2app(W_Compress.next_w), @@ -1159,8 +1142,7 @@ return space.wrap(r) W_Product.typedef = TypeDef( - 'product', - __module__ = 'itertools', + 'itertools.product', __new__ = interp2app(W_Product__new__), __iter__ = interp2app(W_Product.iter_w), next = interp2app(W_Product.next_w), @@ -1263,8 +1245,7 @@ res.__init__(space, pool_w, indices, r) return space.wrap(res) -W_Combinations.typedef = TypeDef("combinations", - __module__ = 'itertools', +W_Combinations.typedef = TypeDef("itertools.combinations", __new__ = interp2app(W_Combinations__new__), __iter__ = interp2app(W_Combinations.descr__iter__), next = interp2app(W_Combinations.descr_next), @@ -1298,8 +1279,8 @@ res.__init__(space, pool_w, indices, r) return space.wrap(res) -W_CombinationsWithReplacement.typedef = TypeDef("combinations_with_replacement", - __module__ = 'itertools', +W_CombinationsWithReplacement.typedef = TypeDef( + "itertools.combinations_with_replacement", __new__ = interp2app(W_CombinationsWithReplacement__new__), __iter__ = interp2app(W_CombinationsWithReplacement.descr__iter__), next = interp2app(W_CombinationsWithReplacement.descr_next), @@ -1364,8 +1345,7 @@ res.__init__(space, pool_w, r) return space.wrap(res) -W_Permutations.typedef = TypeDef("permutations", - __module__ = 'itertools', +W_Permutations.typedef = TypeDef("itertools.permutations", __new__ = interp2app(W_Permutations__new__), __iter__ = interp2app(W_Permutations.descr__iter__), next = interp2app(W_Permutations.descr_next), diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -235,7 +235,7 @@ raise mmap_error(space, e) return space.wrap(self) -W_MMap.typedef = TypeDef("mmap", +W_MMap.typedef = TypeDef("mmap.mmap", __new__ = interp2app(mmap), close = interp2app(W_MMap.close), read_byte = interp2app(W_MMap.read_byte), @@ -251,7 +251,6 @@ flush = interp2app(W_MMap.flush), move = interp2app(W_MMap.move), resize = interp2app(W_MMap.resize), - __module__ = "mmap", __len__ = interp2app(W_MMap.__len__), __getitem__ = interp2app(W_MMap.descr_getitem), From noreply at buildbot.pypy.org Fri May 2 08:03:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:03:09 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix object repr Message-ID: <20140502060309.CEDCB1C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71187:99e42837c465 Date: 2014-05-02 01:46 -0400 http://bitbucket.org/pypy/pypy/changeset/99e42837c465/ Log: fix object repr diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -6,16 +6,17 @@ def descr__repr__(space, w_obj): w_type = space.type(w_obj) - classname = w_type.getname(space) - w_module = w_type.lookup("__module__") - if w_module is not None: - try: - modulename = space.str_w(w_module) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - classname = '%s.%s' % (modulename, classname) + classname = w_type.name + if w_type.is_heaptype(): + w_module = w_type.lookup("__module__") + if w_module is not None: + try: + modulename = space.str_w(w_module) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + classname = '%s.%s' % (modulename, classname) return w_obj.getrepr(space, '%s object' % (classname,)) def descr__str__(space, w_obj): From noreply at buildbot.pypy.org Fri May 2 08:29:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 May 2014 08:29:15 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew for release Message-ID: <20140502062915.154311C088E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71188:f49a12bcf186 Date: 2014-05-02 09:28 +0300 http://bitbucket.org/pypy/pypy/changeset/f49a12bcf186/ Log: update whatsnew for release diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -161,3 +161,7 @@ .. branch: refactor-buffer-api Properly implement old/new buffer API for objects and start work on replacing bufferstr usage + +.. branch: issue1430 +Add a lock for unsafe calls to gethostbyname and gethostbyaddr + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,8 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0524dae88c75 +.. startrev: 0f75ad4d14ce -.. branch: reflex-support - -.. branch: issue1430 From noreply at buildbot.pypy.org Fri May 2 08:29:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 May 2014 08:29:17 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into release Message-ID: <20140502062917.BFE211C088E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71189:6a851784bf2f Date: 2014-05-02 09:28 +0300 http://bitbucket.org/pypy/pypy/changeset/6a851784bf2f/ Log: merge default into release diff too long, truncating to 2000 out of 3386 lines diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = types.ModuleType('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -161,3 +161,7 @@ .. branch: refactor-buffer-api Properly implement old/new buffer API for objects and start work on replacing bufferstr usage + +.. branch: issue1430 +Add a lock for unsafe calls to gethostbyname and gethostbyaddr + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0524dae88c75 +.. startrev: 0f75ad4d14ce - diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -692,23 +692,17 @@ def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" - if self.config.objspace.usemodules.thread: - # we use a sub-function to avoid putting the 'import' statement - # here, where the flow space would see it even if thread=False - return self.__allocate_lock() - else: - return dummy_lock - - def __allocate_lock(self): - from rpython.rlib.rthread import allocate_lock, error + from rpython.rlib import rthread + if not self.config.objspace.usemodules.thread: + return rthread.dummy_lock # hack: we can't have prebuilt locks if we're translating. # In this special situation we should just not lock at all # (translation is not multithreaded anyway). if not we_are_translated() and self.config.translating: raise CannotHaveLock() try: - return allocate_lock() - except error: + return rthread.allocate_lock() + except rthread.error: raise OperationError(self.w_RuntimeError, self.wrap("out of resources")) @@ -1415,10 +1409,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): @@ -1722,24 +1716,6 @@ return space.getitem(w_glob, space.wrap('anonymous')) -class DummyLock(object): - def acquire(self, flag): - return True - - def release(self): - pass - - def _freeze_(self): - return True - - def __enter__(self): - pass - - def __exit__(self, *args): - pass - -dummy_lock = DummyLock() - # Table describing the regular part of the interface of object spaces, # namely all methods which only take w_ arguments and return a w_ result # (if any). diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -362,9 +362,9 @@ value = getattr(self, attr) if fmt == 'R': result = space.str_w(space.repr(value)) - elif fmt in 'NT': - if fmt == 'T': - value = space.type(value) + elif fmt == 'T': + result = space.type(value).get_module_type_name() + elif fmt == 'N': result = value.getname(space) else: result = str(value) @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).getname(space) + %T - The result of space.type(w_arg).get_module_type_name() """ if not len(args): diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -27,6 +27,7 @@ consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) + if not dont_inherit: caller = ec.gettopframe_nohidden() if caller: @@ -37,8 +38,7 @@ space.wrap("compile() arg 3 must be 'exec' " "or 'eval' or 'single'")) - w_ast_type = space.gettypeobject(ast.AST.typedef) - if space.isinstance_w(w_source, w_ast_type): + if space.isinstance_w(w_source, space.gettypeobject(ast.AST.typedef)): ast_node = space.interp_w(ast.mod, w_source) ast_node.sync_app_attrs(space) code = ec.compiler.compile_ast(ast_node, filename, mode, flags) @@ -47,20 +47,20 @@ if space.isinstance_w(w_source, space.w_unicode): w_utf_8_source = space.call_method(w_source, "encode", space.wrap("utf-8")) - str_ = space.str_w(w_utf_8_source) + source = space.str_w(w_utf_8_source) # This flag tells the parser to reject any coding cookies it sees. flags |= consts.PyCF_SOURCE_IS_UTF8 else: - str_ = space.readbuf_w(w_source).as_str() + source = space.readbuf_w(w_source).as_str() - if '\x00' in str_: + if '\x00' in source: raise OperationError(space.w_TypeError, space.wrap( "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: - code = ec.compiler.compile_to_ast(str_, filename, mode, flags) + code = ec.compiler.compile_to_ast(source, filename, mode, flags) else: - code = ec.compiler.compile(str_, filename, mode, flags) + code = ec.compiler.compile(source, filename, mode, flags) return space.wrap(code) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -19,6 +19,11 @@ ast = self.ast assert isinstance(ast.__version__, str) + def test_flags(self): + skip("broken") + from copy_reg import _HEAPTYPE + assert self.ast.Module.__flags__ & _HEAPTYPE + def test_build_ast(self): ast = self.ast mod = self.get_ast("x = 4") @@ -218,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert exc.value.args[0] == "'Num' object has no attribute 'n'" + assert "Num' object has no attribute 'n'" in exc.value.args[0] x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert exc.value.args[0] == "'Num' object has no attribute 'lineno'" + assert "Num' object has no attribute 'lineno'" in exc.value.args[0] y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert exc.value.args[0] == "'Num' object has no attribute 'foobar'" + assert "Num' object has no attribute 'foobar'" in exc.value.args[0] x = ast.Num(lineno=2) assert x.lineno == 2 @@ -244,9 +249,8 @@ raises(TypeError, ast.Num, 1, 2, lineno=0) def test_issue1680_nonseq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -287,9 +291,8 @@ assert not hasattr(mod.body[0], 'name') def test_issue1680_seq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -392,9 +395,8 @@ import ast num_node = ast.Num(n=2, lineno=2, col_offset=3) dict_res = num_node.__dict__ - assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Num_notfullinit(self): import ast import copy @@ -402,7 +404,7 @@ assert num_node.n == 2 assert num_node.lineno == 2 num_node2 = copy.deepcopy(num_node) - + def test_issue1673_Num_fullinit(self): import ast import copy @@ -413,7 +415,7 @@ assert num_node.col_offset == num_node2.col_offset dict_res = num_node2.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Str(self): import ast import copy @@ -423,4 +425,3 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} - \ No newline at end of file diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -59,7 +59,7 @@ self.tt, self.it, calls_repr)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsEntry.typedef = TypeDef( 'StatsEntry', @@ -86,7 +86,7 @@ frame_repr, self.callcount, self.reccallcount, self.tt, self.it)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsSubEntry.typedef = TypeDef( 'SubStatsEntry', @@ -189,50 +189,82 @@ subentry._stop(tt, it) - at jit.elidable_promote() def create_spec_for_method(space, w_function, w_type): - w_function = w_function + class_name = None if isinstance(w_function, Function): name = w_function.name + # try to get the real class that defines the method, + # which is a superclass of the class of the instance + from pypy.objspace.std.typeobject import W_TypeObject # xxx + if isinstance(w_type, W_TypeObject): + w_realclass, _ = space.lookup_in_type_where(w_type, name) + if isinstance(w_realclass, W_TypeObject): + class_name = w_realclass.get_module_type_name() else: name = '?' - # try to get the real class that defines the method, - # which is a superclass of the class of the instance - from pypy.objspace.std.typeobject import W_TypeObject # xxx - class_name = w_type.getname(space) # if the rest doesn't work - if isinstance(w_type, W_TypeObject) and name != '?': - w_realclass, _ = space.lookup_in_type_where(w_type, name) - if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() - return "{method '%s' of '%s' objects}" % (name, class_name) + if class_name is None: + class_name = w_type.getname(space) # if the rest doesn't work + return "" % (name, class_name) - at jit.elidable_promote() def create_spec_for_function(space, w_func): - if w_func.w_module is None: - module = '' + assert isinstance(w_func, Function) + if w_func.w_module is not None: + module = space.str_w(w_func.w_module) + if module != '__builtin__': + return '<%s.%s>' % (module, w_func.name) + return '<%s>' % w_func.name + + +def create_spec_for_object(space, w_type): + class_name = w_type.getname(space) + return "<'%s' object>" % (class_name,) + + +class W_DelayedBuiltinStr(W_Root): + # This class should not be seen at app-level, but is useful to + # contain a (w_func, w_type) pair returned by prepare_spec(). + # Turning this pair into a string cannot be done eagerly in + # an @elidable function because of space.str_w(), but it can + # be done lazily when we really want it. + + _immutable_fields_ = ['w_func', 'w_type'] + + def __init__(self, w_func, w_type): + self.w_func = w_func + self.w_type = w_type + self.w_string = None + + def wrap_string(self, space): + if self.w_string is None: + if self.w_type is None: + s = create_spec_for_function(space, self.w_func) + elif self.w_func is None: + s = create_spec_for_object(space, self.w_type) + else: + s = create_spec_for_method(space, self.w_func, self.w_type) + self.w_string = space.wrap(s) + return self.w_string + +W_DelayedBuiltinStr.typedef = TypeDef( + 'DelayedBuiltinStr', + __str__ = interp2app(W_DelayedBuiltinStr.wrap_string), +) + +def returns_code(space, w_frame): + if isinstance(w_frame, W_DelayedBuiltinStr): + return w_frame.wrap_string(space) + return w_frame # actually a PyCode object + + +def prepare_spec(space, w_arg): + if isinstance(w_arg, Method): + return (w_arg.w_function, w_arg.w_class) + elif isinstance(w_arg, Function): + return (w_arg, None) else: - module = space.str_w(w_func.w_module) - if module == '__builtin__': - module = '' - else: - module += '.' - return '{%s%s}' % (module, w_func.name) - - - at jit.elidable_promote() -def create_spec_for_object(space, w_obj): - class_name = space.type(w_obj).getname(space) - return "{'%s' object}" % (class_name,) - - -def create_spec(space, w_arg): - if isinstance(w_arg, Method): - return create_spec_for_method(space, w_arg.w_function, w_arg.w_class) - elif isinstance(w_arg, Function): - return create_spec_for_function(space, w_arg) - else: - return create_spec_for_object(space, w_arg) + return (None, space.type(w_arg)) +prepare_spec._always_inline_ = True def lsprof_call(space, w_self, frame, event, w_arg): @@ -245,12 +277,10 @@ w_self._enter_return(code) elif event == 'c_call': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_call(key) + w_self._enter_builtin_call(w_arg) elif event == 'c_return' or event == 'c_exception': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_return(key) + w_self._enter_builtin_return(w_arg) else: # ignore or raise an exception??? pass @@ -313,13 +343,14 @@ return entry raise - @jit.elidable - def _get_or_make_builtin_entry(self, key, make=True): + @jit.elidable_promote() + def _get_or_make_builtin_entry(self, w_func, w_type, make): + key = (w_func, w_type) try: return self.builtin_data[key] except KeyError: if make: - entry = ProfilerEntry(self.space.wrap(key)) + entry = ProfilerEntry(W_DelayedBuiltinStr(w_func, w_type)) self.builtin_data[key] = entry return entry raise @@ -343,18 +374,18 @@ context._stop(self, entry) self.current_context = context.previous - def _enter_builtin_call(self, key): - self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key) + def _enter_builtin_call(self, w_arg): + w_func, w_type = prepare_spec(self.space, w_arg) + entry = self._get_or_make_builtin_entry(w_func, w_type, True) self.current_context = ProfilerContext(self, entry) - def _enter_builtin_return(self, key): + def _enter_builtin_return(self, w_arg): context = self.current_context if context is None: return - self = jit.promote(self) + w_func, w_type = prepare_spec(self.space, w_arg) try: - entry = self._get_or_make_builtin_entry(key, False) + entry = self._get_or_make_builtin_entry(w_func, w_type, False) except KeyError: pass else: diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -11,6 +11,48 @@ import _lsprof assert repr(_lsprof.Profiler) == "" + def test_builtins(self): + import _lsprof + prof = _lsprof.Profiler() + lst = [] + prof.enable() + lst.append(len(lst)) + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + "", + ) + for entry in stats: + assert entry.code in expected + + def test_builtins_callers(self): + import _lsprof + prof = _lsprof.Profiler(subcalls=True) + lst = [] + def f1(): + lst.append(len(lst)) + prof.enable(subcalls=True) + f1() + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + ) + by_id = set() + for entry in stats: + if entry.code == f1.__code__: + assert len(entry.calls) == 2 + for subentry in entry.calls: + assert subentry.code in expected + by_id.add(id(subentry.code)) + elif entry.code in expected: + by_id.add(id(entry.code)) + # :-( cProfile.py relies on the id() of the strings... + assert len(by_id) == len(expected) + def test_direct(self): import _lsprof def getticks(): @@ -37,10 +79,8 @@ stats = prof.getstats() entries = {} for entry in stats: - if not hasattr(entry.code, 'co_name'): - print entry.code - else: - entries[entry.code.co_name] = entry + assert hasattr(entry.code, 'co_name') + entries[entry.code.co_name] = entry efoo = entries['foo'] assert efoo.callcount == 2 assert efoo.reccallcount == 1 @@ -104,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -179,12 +219,12 @@ lines.remove(line) break else: - print 'NOT FOUND:', pattern.rstrip('\n') - print '--- GOT ---' - print got - print - print '--- EXPECTED ---' - print expected + print('NOT FOUND: %s' % pattern.rstrip('\n')) + print('--- GOT ---') + print(got) + print() + print('--- EXPECTED ---') + print(expected) assert False assert not lines finally: diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1084,27 +1084,27 @@ s = S(autofree=True) b = buffer(s) assert len(b) == 40 - b[4] = 'X' - b[:3] = 'ABC' - assert b[:6] == 'ABC\x00X\x00' + b[4] = b'X' + b[:3] = b'ABC' + assert b[:6] == b'ABC\x00X\x00' A = _rawffi.Array('c') a = A(10, autofree=True) - a[3] = 'x' + a[3] = b'x' b = buffer(a) assert len(b) == 10 - assert b[3] == 'x' - b[6] = 'y' - assert a[6] == 'y' - b[3:5] = 'zt' - assert a[3] == 'z' - assert a[4] == 't' + assert b[3] == b'x' + b[6] = b'y' + assert a[6] == b'y' + b[3:5] = b'zt' + assert a[3] == b'z' + assert a[4] == b't' b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,6 +17,8 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() + from pypy.module._socket.interp_func import State + space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -42,8 +42,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host) + res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -55,8 +56,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host) + res = rsocket.gethostbyaddr(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -310,3 +312,10 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) + +class State(object): + def __init__(self, space): + self.netdb_lock = None + + def startup(self, space): + self.netdb_lock = space.allocate_lock() diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,7 +600,8 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_RSocket.typedef = TypeDef("socket", + __module__ = "_socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -313,6 +313,11 @@ cls.space = space cls.w_udir = space.wrap(str(udir)) + def test_module(self): + import _socket + assert _socket.socket.__name__ == 'socket' + assert _socket.socket.__module__ == '_socket' + def test_ntoa_exception(self): import _socket raises(_socket.error, _socket.inet_ntoa, "ab") diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -16,7 +16,7 @@ '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', - 'CPPInstance' : 'interp_cppyy.W_CPPInstance', + 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', } @@ -25,7 +25,7 @@ '_init_pythonify' : 'pythonify._init_pythonify', 'load_reflection_info' : 'pythonify.load_reflection_info', 'add_pythonization' : 'pythonify.add_pythonization', - 'Template' : 'pythonify.CppyyTemplateType', + 'Template' : 'pythonify.CPPTemplate', } def __init__(self, space, *args): diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -127,19 +127,18 @@ argc = len(args_w) try: - # Note: argcount is +1 for the class (== w_self) - if argc < 5 or 6 < argc: + if argc < 4 or 5 < argc: raise TypeError("wrong number of arguments") - # second argument must be a name - funcname = space.str_w(args_w[1]) + # first argument must be a name + funcname = space.str_w(args_w[0]) # last (optional) argument is number of parameters npar = 0 - if argc == 6: npar = space.int_w(args_w[5]) + if argc == 5: npar = space.int_w(args_w[4]) - # third argument must be a callable python object - w_callable = args_w[2] + # second argument must be a callable python object + w_callable = args_w[1] if not space.is_true(space.callable(w_callable)): raise TypeError("2nd argument is not a valid python callable") @@ -159,17 +158,21 @@ # so far, so good; leaves on issue: CINT is expecting a wrapper, but # we need the overload that takes a function pointer, which is not in # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[1]), funcaddr, - space.float_w(args_w[3]), space.float_w(args_w[4]), npar) - - from pypy.module.cppyy import interp_cppyy - w_instance = interp_cppyy.wrap_cppobject(space, newinst, tf1_class, - do_cast=False, python_owns=True, fresh=True) + newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, + space.float_w(args_w[2]), space.float_w(args_w[3]), npar) + + # w_self is a null-ptr bound as TF1 + from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator + cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) + cppself._rawobject = newinst + memory_regulator.register(cppself) # tie all the life times to the TF1 instance - space.setattr(w_instance, space.wrap('_callback'), w_callback) + space.setattr(w_self, space.wrap('_callback'), w_callback) - return w_instance + # by definition for __init__ + return None + except (OperationError, TypeError, IndexError), e: newargs_w = args_w[1:] # drop class @@ -312,7 +315,7 @@ # location w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.buffer_w(w_address) + buf = space.getarg_w('s*', w_address) from pypy.module._rawffi import buffer assert isinstance(buf, buffer.RawFFIBuffer) address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) @@ -395,7 +398,7 @@ _method_alias(space, w_pycppclass, "__len__", "GetSize") elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__new__"), _pythonizations["tf1_tf1"]) + space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) elif name == "TFile": _method_alias(space, w_pycppclass, "__getattr__", "Get") diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -155,18 +155,16 @@ the memory_regulator.""" _attrs_ = ['space', 'scope', 'index', 'cppmethod', 'arg_defs', 'args_required', - 'args_expected', 'converters', 'executor', '_funcaddr', 'cif_descr', - 'uses_local'] + 'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local'] _immutable_ = True - def __init__(self, space, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, declaring_scope, method_index, arg_defs, args_required): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.index = method_index self.cppmethod = capi.c_get_method(self.space, self.scope, method_index) self.arg_defs = arg_defs self.args_required = args_required - self.args_expected = len(arg_defs) # Setup of the method dispatch's innards is done lazily, i.e. only when # the method is actually used. @@ -176,6 +174,12 @@ self._funcaddr = lltype.nullptr(rffi.VOIDP.TO) self.uses_local = False + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) + cppinstance._nullcheck() + return cppinstance.get_cppthis(declaring_scope) + def _address_from_local_buffer(self, call_local, idx): if not call_local: return call_local @@ -277,7 +281,7 @@ funcaddr = methgetter(rffi.cast(capi.C_OBJECT, cppthis)) self._funcaddr = rffi.cast(rffi.VOIDP, funcaddr) - nargs = self.args_expected + 1 # +1: cppthis + nargs = len(self.arg_defs) + 1 # +1: cppthis # memory block for CIF description (note: not tracked as the life # time of methods is normally the duration of the application) @@ -335,7 +339,7 @@ # extra cif_descr.abi = clibffi.FFI_DEFAULT_ABI - cif_descr.nargs = self.args_expected + 1 # +1: cppthis + cif_descr.nargs = len(self.arg_defs) + 1 # +1: cppthis res = jit_libffi.jit_ffi_prep_cif(cif_descr) if res != clibffi.FFI_OK: @@ -405,28 +409,29 @@ class CPPFunction(CPPMethod): - """Global (namespaced) function dispatcher. For now, the base class has - all the needed functionality, by allowing the C++ this pointer to be null - in the call. An optimization is expected there, however.""" + """Global (namespaced) function dispatcher.""" _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return capi.C_NULL_OBJECT + def __repr__(self): return "CPPFunction: %s" % self.signature() class CPPTemplatedCall(CPPMethod): - """Method dispatcher that first needs to resolve the template instance. - Note that the derivation is from object: the CPPMethod is a data member.""" + """Method dispatcher that first resolves the template instance.""" - _attrs_ = ['space', 'templ_args', 'method'] + _attrs_ = ['space', 'templ_args'] _immutable_ = True - def __init__(self, space, templ_args, containing_scope, method_index, arg_defs, args_required): + def __init__(self, space, templ_args, declaring_scope, method_index, arg_defs, args_required): self.space = space self.templ_args = templ_args # TODO: might have to specialize for CPPTemplatedCall on CPPMethod/CPPFunction here - CPPMethod.__init__(self, space, containing_scope, method_index, arg_defs, args_required) + CPPMethod.__init__(self, space, declaring_scope, method_index, arg_defs, args_required) def call(self, cppthis, args_w): assert lltype.typeOf(cppthis) == capi.C_OBJECT @@ -456,24 +461,15 @@ _immutable_ = True + @staticmethod + def unpack_cppthis(space, w_cppinstance, declaring_scope): + return rffi.cast(capi.C_OBJECT, declaring_scope.handle) + def call(self, cppthis, args_w): - # TODO: these casts are very, very un-pretty; need to find a way of - # re-using CPPMethod's features w/o these roundabouts - vscope = rffi.cast(capi.C_OBJECT, self.scope.handle) - cppinstance = None - try: - cppinstance = self.space.interp_w(W_CPPInstance, args_w[0], can_be_None=False) - use_args_w = args_w[1:] - except (OperationError, TypeError), e: - use_args_w = args_w - w_result = CPPMethod.call(self, vscope, use_args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) - if cppinstance: - cppinstance._rawobject = newthis - memory_regulator.register(cppinstance) - return args_w[0] - return wrap_cppobject(self.space, newthis, self.scope, - do_cast=False, python_owns=True, fresh=True) + # Note: this does not return a wrapped instance, just a pointer to the + # new instance; the overload must still wrap it before returning. Also, + # cppthis is declaring_scope.handle (as per unpack_cppthis(), above). + return CPPMethod.call(self, cppthis, args_w) def __repr__(self): return "CPPConstructor: %s" % self.signature() @@ -505,9 +501,10 @@ _attrs_ = ['space', 'scope', 'functions'] _immutable_fields_ = ['scope', 'functions[*]'] - def __init__(self, space, containing_scope, functions): + def __init__(self, space, declaring_scope, functions): self.space = space - self.scope = containing_scope + self.scope = declaring_scope + assert len(functions) from rpython.rlib import debug self.functions = debug.make_sure_not_resized(functions) @@ -520,12 +517,10 @@ @jit.unroll_safe @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): - cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) - if cppinstance is not None: - cppinstance._nullcheck() - cppthis = cppinstance.get_cppthis(self.scope) - else: - cppthis = capi.C_NULL_OBJECT + # instance handling is specific to the function type only, so take it out + # of the loop over function overloads + cppthis = self.functions[0].unpack_cppthis( + self.space, w_cppinstance, self.functions[0].scope) assert lltype.typeOf(cppthis) == capi.C_OBJECT # The following code tries out each of the functions in order. If @@ -585,6 +580,39 @@ ) +class W_CPPConstructorOverload(W_CPPOverload): + @jit.elidable_promote() + def is_static(self): + return self.space.w_False + + @jit.elidable_promote() + def unpack_cppthis(self, w_cppinstance): + return rffi.cast(capi.C_OBJECT, self.scope.handle) + + @jit.unroll_safe + @unwrap_spec(args_w='args_w') + def call(self, w_cppinstance, args_w): + w_result = W_CPPOverload.call(self, w_cppinstance, args_w) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) + if cppinstance is not None: + cppinstance._rawobject = newthis + memory_regulator.register(cppinstance) + return w_cppinstance + return wrap_cppobject(self.space, newthis, self.functions[0].scope, + do_cast=False, python_owns=True, fresh=True) + + def __repr__(self): + return "W_CPPConstructorOverload(%s)" % [f.signature() for f in self.functions] + +W_CPPConstructorOverload.typedef = TypeDef( + 'CPPConstructorOverload', + is_static = interp2app(W_CPPConstructorOverload.is_static), + call = interp2app(W_CPPConstructorOverload.call), + signature = interp2app(W_CPPOverload.signature), +) + + class W_CPPBoundMethod(W_Root): _attrs_ = ['cppthis', 'method'] @@ -605,9 +633,9 @@ _attrs_ = ['space', 'scope', 'converter', 'offset'] _immutable_fields = ['scope', 'converter', 'offset'] - def __init__(self, space, containing_scope, type_name, offset): + def __init__(self, space, declaring_scope, type_name, offset): self.space = space - self.scope = containing_scope + self.scope = declaring_scope self.converter = converter.get_converter(self.space, type_name, '') self.offset = offset @@ -717,7 +745,10 @@ # create the overload methods from the method sets for pyname, methods in methods_temp.iteritems(): CPPMethodSort(methods).sort() - overload = W_CPPOverload(self.space, self, methods[:]) + if pyname == self.name: + overload = W_CPPConstructorOverload(self.space, self, methods[:]) + else: + overload = W_CPPOverload(self.space, self, methods[:]) self.methods[pyname] = overload def full_name(self): @@ -857,14 +888,13 @@ class W_CPPClass(W_CPPScope): - _attrs_ = ['space', 'default_constructor', 'name', 'handle', 'methods', 'datamembers'] - _immutable_fields_ = ['kind', 'default_constructor', 'methods[*]', 'datamembers[*]'] + _attrs_ = ['space', 'name', 'handle', 'methods', 'datamembers'] + _immutable_fields_ = ['kind', 'constructor', 'methods[*]', 'datamembers[*]'] kind = "class" def __init__(self, space, name, opaque_handle): W_CPPScope.__init__(self, space, name, opaque_handle) - self.default_constructor = None def _make_cppfunction(self, pyname, index): num_args = capi.c_method_num_args(self.space, self, index) @@ -876,8 +906,6 @@ arg_defs.append((arg_type, arg_dflt)) if capi.c_is_constructor(self.space, self, index): cppfunction = CPPConstructor(self.space, self, index, arg_defs, args_required) - if args_required == 0: - self.default_constructor = cppfunction elif capi.c_method_is_template(self.space, self, index): templ_args = capi.c_template_args(self.space, self, index) cppfunction = CPPTemplatedCall(self.space, templ_args, self, index, arg_defs, args_required) @@ -905,9 +933,7 @@ self.datamembers[datamember_name] = datamember def construct(self): - if self.default_constructor is not None: - return self.default_constructor.call(capi.C_NULL_OBJECT, []) - raise self.missing_attribute_error("default_constructor") + return self.get_overload(self.name).call(None, []) def find_overload(self, name): raise self.missing_attribute_error(name) @@ -1046,6 +1072,16 @@ raise return None + def instance__init__(self, args_w): + try: + constructor_overload = self.cppclass.get_overload(self.cppclass.name) + constructor_overload.call(self, args_w) + except OperationError, e: + if not e.match(self.space, self.space.w_AttributeError): + raise + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot instantiate abstract class '%s'" % self.cppclass.name)) + def instance__eq__(self, w_other): # special case: if other is None, compare pointer-style if self.space.is_w(w_other, self.space.w_None): @@ -1128,6 +1164,7 @@ 'CPPInstance', cppclass = interp_attrproperty('cppclass', cls=W_CPPInstance), _python_owns = GetSetProperty(W_CPPInstance.fget_python_owns, W_CPPInstance.fset_python_owns), + __init__ = interp2app(W_CPPInstance.instance__init__), __eq__ = interp2app(W_CPPInstance.instance__eq__), __ne__ = interp2app(W_CPPInstance.instance__ne__), __nonzero__ = interp2app(W_CPPInstance.instance__nonzero__), diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,30 +1,31 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible # with info from multiple dictionaries and do not need to bother with meta # classes for inheritance. Both are python classes, though, and refactoring # may be in order at some point. -class CppyyScopeMeta(type): +class CPPScope(type): def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) -class CppyyNamespaceMeta(CppyyScopeMeta): +class CPPNamespace(CPPScope): def __dir__(cls): return cls._cpp_proxy.__dir__() -class CppyyClassMeta(CppyyScopeMeta): +class CPPClass(CPPScope): pass -# class CppyyClass defined in _init_pythonify() +# class CPPInstance defined in _init_pythonify() -class CppyyTemplateType(object): +class CPPTemplate(object): def __init__(self, name, scope=None): self._name = name if scope is None: @@ -91,7 +92,7 @@ # build up a representation of a C++ namespace (namespaces are classes) # create a meta class to allow properties (for static data write access) - metans = type(CppyyNamespaceMeta)(namespace_name+'_meta', (CppyyNamespaceMeta,), {}) + metans = type(CPPNamespace)(namespace_name+'_meta', (CPPNamespace,), {}) if cppns: d = {"_cpp_proxy" : cppns} @@ -137,21 +138,14 @@ break return tuple(bases) -def make_new(class_name, cppclass): - try: - constructor_overload = cppclass.get_overload(cppclass.type_name) - except AttributeError: - msg = "cannot instantiate abstract class '%s'" % class_name - def __new__(cls, *args): - raise TypeError(msg) - else: - def __new__(cls, *args): - # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) - if not instance.__class__ is cls: - instance.__class__ = cls # happens for derived class - return instance +def make_new(class_name): + def __new__(cls, *args): + # create a place-holder only as there may be a derived class defined + import cppyy + instance = cppyy.bind_object(0, class_name, True) + if not instance.__class__ is cls: + instance.__class__ = cls # happens for derived class + return instance return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -159,7 +153,7 @@ # get a list of base classes for class creation bases = [get_pycppclass(base) for base in cppclass.get_base_names()] if not bases: - bases = [CppyyClass,] + bases = [CPPInstance,] else: # it's technically possible that the required class now has been built # if one of the base classes uses it in e.g. a function interface @@ -170,7 +164,7 @@ # create a meta class to allow properties (for static data write access) metabases = [type(base) for base in bases] - metacpp = type(CppyyClassMeta)(class_name+'_meta', _drop_cycles(metabases), {}) + metacpp = type(CPPClass)(class_name+'_meta', _drop_cycles(metabases), {}) # create the python-side C++ class representation def dispatch(self, name, signature): @@ -178,7 +172,7 @@ return types.MethodType(make_method(name, cppol), self, type(self)) d = {"_cpp_proxy" : cppclass, "__dispatch__" : dispatch, - "__new__" : make_new(class_name, cppclass), + "__new__" : make_new(class_name), } pycppclass = metacpp(class_name, _drop_cycles(bases), d) @@ -214,7 +208,7 @@ return pycppclass def make_cpptemplatetype(scope, template_name): - return CppyyTemplateType(template_name, scope) + return CPPTemplate(template_name, scope) def get_pycppitem(scope, name): @@ -309,7 +303,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] @@ -426,15 +420,12 @@ # at pypy-c startup, rather than on the "import cppyy" statement import cppyy - # top-level classes - global CppyyClass - class CppyyClass(cppyy.CPPInstance): - __metaclass__ = CppyyClassMeta - - def __init__(self, *args, **kwds): - # self is only a placeholder; now create the actual C++ object - args = (self,) + args - self._cpp_proxy.get_overload(self._cpp_proxy.type_name).call(None, *args) + # root of all proxy classes: CPPInstance in pythonify exists to combine the + # CPPClass meta class with the interp-level CPPInstanceBase + global CPPInstance + class CPPInstance(cppyy.CPPInstanceBase): + __metaclass__ = CPPClass + pass # class generator callback cppyy._set_class_generator(clgen_callback) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -4,17 +4,22 @@ #include #include #include +#include #include #include +#include #include #include +#pragma GCC diagnostic ignored "-Winvalid-offsetof" + // add example01.cxx code int globalAddOneToInt(int a); namespace dummy { #include "example01.cxx" +#include "datatypes.cxx" } int globalAddOneToInt(int a) { @@ -27,168 +32,307 @@ typedef std::map Handles_t; static Handles_t s_handles; +enum EMethodType { kNormal=0, kConstructor=1, kStatic=2 }; + struct Cppyy_PseudoMethodInfo { Cppyy_PseudoMethodInfo(const std::string& name, const std::vector& argtypes, - const std::string& returntype) : - m_name(name), m_argtypes(argtypes), m_returntype(returntype) {} + const std::string& returntype, + EMethodType mtype = kNormal) : + m_name(name), m_argtypes(argtypes), m_returntype(returntype), m_type(mtype) {} std::string m_name; std::vector m_argtypes; std::string m_returntype; + EMethodType m_type; +}; + +struct Cppyy_PseudoDatambrInfo { + Cppyy_PseudoDatambrInfo(const std::string& name, + const std::string& type, + size_t offset, bool isstatic) : + m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} + + std::string m_name; + std::string m_type; + size_t m_offset; + bool m_isstatic; }; struct Cppyy_PseudoClassInfo { Cppyy_PseudoClassInfo() {} - Cppyy_PseudoClassInfo(const std::vector& methods) : - m_methods(methods ) {} + Cppyy_PseudoClassInfo(const std::vector& methods, + long method_offset, + const std::vector& data) : + m_methods(methods), m_method_offset(method_offset), m_datambrs(data) {} std::vector m_methods; + long m_method_offset; + std::vector m_datambrs; }; typedef std::map Scopes_t; static Scopes_t s_scopes; -static int example01_last_static_method = 0; -static int example01_last_constructor = 0; -static int payload_methods_offset = 0; +static std::map s_methods; + +#define PUBLIC_CPPYY_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ + offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname, argtypes, #dmtype)); \ + s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + argtypes.push_back(#dmtype); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname, argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + argtypes.clear(); \ + argtypes.push_back("const "#dmtype"&"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "set_"#dmname"_c", argtypes, "void")); \ + s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ + PUBLIC_CPPYY_DATA(dmname, dmtype); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ + offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + argtypes.clear(); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_array2", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + +#define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ + PUBLIC_CPPYY_DATA2(dmname, dmtype); \ + argtypes.push_back(#dmtype"*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_array", argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + argtypes.clear(); argtypes.push_back("void*"); \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "pass_void_array_"#key, argtypes, #dmtype"*")); \ + s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + +#define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ + data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ + (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { // class example01 -- - static long s_scope_id = 0; + static long s_scope_id = 0; + static long s_method_id = 0; { // class example01 -- s_handles["example01"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // ( 0) static double staticAddToDouble(double a) + // static double staticAddToDouble(double a) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddToDouble", argtypes, "double", kStatic)); + s_methods["static_example01::staticAddToDouble_double"] = s_method_id++; - // ( 1) static int staticAddOneToInt(int a) - // ( 2) static int staticAddOneToInt(int a, int b) + // static int staticAddOneToInt(int a) + // static int staticAddOneToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAddOneToInt", argtypes, "int", kStatic)); + s_methods["static_example01::staticAddOneToInt_int_int"] = s_method_id++; - // ( 3) static int staticAtoi(const char* str) + // static int staticAtoi(const char* str) argtypes.clear(); argtypes.push_back("const char*"); - methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("staticAtoi", argtypes, "int", kStatic)); + s_methods["static_example01::staticAtoi_cchar*"] = s_method_id++; - // ( 4) static char* staticStrcpy(const char* strin) - methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*")); + // static char* staticStrcpy(const char* strin) + methods.push_back(Cppyy_PseudoMethodInfo("staticStrcpy", argtypes, "char*", kStatic)); + s_methods["static_example01::staticStrcpy_cchar*"] = s_method_id++; - // ( 5) static void staticSetPayload(payload* p, double d) - // ( 6) static payload* staticCyclePayload(payload* p, double d) - // ( 7) static payload staticCopyCyclePayload(payload* p, double d) + // static void staticSetPayload(payload* p, double d) + // static payload* staticCyclePayload(payload* p, double d) + // static payload staticCopyCyclePayload(payload* p, double d) argtypes.clear(); argtypes.push_back("payload*"); argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*")); - methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload")); + methods.push_back(Cppyy_PseudoMethodInfo("staticSetPayload", argtypes, "void", kStatic)); + s_methods["static_example01::staticSetPayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCyclePayload", argtypes, "payload*", kStatic)); + s_methods["static_example01::staticCyclePayload_payload*_double"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("staticCopyCyclePayload", argtypes, "payload", kStatic)); + s_methods["static_example01::staticCopyCyclePayload_payload*_double"] = s_method_id++; - // ( 8) static int getCount() - // ( 9) static void setCount(int) + // static int getCount() + // static void setCount(int) argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int")); + methods.push_back(Cppyy_PseudoMethodInfo("getCount", argtypes, "int", kStatic)); + s_methods["static_example01::getCount"] = s_method_id++; argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void")); + methods.push_back(Cppyy_PseudoMethodInfo("setCount", argtypes, "void", kStatic)); + s_methods["static_example01::setCount_int"] = s_method_id++; - // cut-off is used in cppyy_is_static - example01_last_static_method = methods.size(); + // example01() + // example01(int a) + argtypes.clear(); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01"] = s_method_id++; + argtypes.push_back("int"); + methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor", kConstructor)); + s_methods["example01::example01_int"] = s_method_id++; - // (10) example01() - // (11) example01(int a) - argtypes.clear(); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - argtypes.push_back("int"); - methods.push_back(Cppyy_PseudoMethodInfo("example01", argtypes, "constructor")); - - // cut-off is used in cppyy_is_constructor - example01_last_constructor = methods.size(); - - // (12) int addDataToInt(int a) + // int addDataToInt(int a) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToInt", argtypes, "int")); + s_methods["example01::addDataToInt_int"] = s_method_id++; - // (13) int addDataToIntConstRef(const int& a) + // int addDataToIntConstRef(const int& a) argtypes.clear(); argtypes.push_back("const int&"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToIntConstRef", argtypes, "int")); + s_methods["example01::addDataToIntConstRef_cint&"] = s_method_id++; - // (14) int overloadedAddDataToInt(int a, int b) + // int overloadedAddDataToInt(int a, int b) argtypes.clear(); argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int"] = s_method_id++; - // (15) int overloadedAddDataToInt(int a) - // (16) int overloadedAddDataToInt(int a, int b, int c) + // int overloadedAddDataToInt(int a) + // int overloadedAddDataToInt(int a, int b, int c) argtypes.clear(); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); - + s_methods["example01::overloadedAddDataToInt_int"] = s_method_id++; argtypes.push_back("int"); argtypes.push_back("int"); methods.push_back(Cppyy_PseudoMethodInfo("overloadedAddDataToInt", argtypes, "int")); + s_methods["example01::overloadedAddDataToInt_int_int_int"] = s_method_id++; - // (17) double addDataToDouble(double a) + // double addDataToDouble(double a) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToDouble", argtypes, "double")); + s_methods["example01::addDataToDouble_double"] = s_method_id++; - // (18) int addDataToAtoi(const char* str) - // (19) char* addToStringValue(const char* str) + // int addDataToAtoi(const char* str) + // char* addToStringValue(const char* str) argtypes.clear(); argtypes.push_back("const char*"); methods.push_back(Cppyy_PseudoMethodInfo("addDataToAtoi", argtypes, "int")); + s_methods["example01::addDataToAtoi_cchar*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("addToStringValue", argtypes, "char*")); + s_methods["example01::addToStringValue_cchar*"] = s_method_id++; - // (20) void setPayload(payload* p) - // (21) payload* cyclePayload(payload* p) - // (22) payload copyCyclePayload(payload* p) + // void setPayload(payload* p) + // payload* cyclePayload(payload* p) + // payload copyCyclePayload(payload* p) argtypes.clear(); argtypes.push_back("payload*"); methods.push_back(Cppyy_PseudoMethodInfo("setPayload", argtypes, "void")); + s_methods["example01::setPayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("cyclePayload", argtypes, "payload*")); + s_methods["example01::cyclePayload_payload*"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("copyCyclePayload", argtypes, "payload")); + s_methods["example01::copyCyclePayload_payload*"] = s_method_id++; - payload_methods_offset = methods.size(); - - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class example01 + //==================================================================== + { // class payload -- s_handles["payload"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // (23) payload(double d = 0.) + // payload(double d = 0.) std::vector argtypes; argtypes.push_back("double"); - methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor")); + methods.push_back(Cppyy_PseudoMethodInfo("payload", argtypes, "constructor", kConstructor)); + s_methods["payload::payload_double"] = s_method_id++; - // (24) double getData() + // double getData() argtypes.clear(); methods.push_back(Cppyy_PseudoMethodInfo("getData", argtypes, "double")); + s_methods["payload::getData"] = s_method_id++; - // (25) void setData(double d) + // void setData(double d) argtypes.clear(); argtypes.push_back("double"); methods.push_back(Cppyy_PseudoMethodInfo("setData", argtypes, "void")); + s_methods["payload::setData_double"] = s_method_id++; - Cppyy_PseudoClassInfo info(methods); + Cppyy_PseudoClassInfo info( + methods, s_method_id - methods.size(), std::vector()); s_scopes[(cppyy_scope_t)s_scope_id] = info; } // -- class payload + + //==================================================================== + + { // class cppyy_test_data -- + s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + + std::vector methods; + + // cppyy_test_data() + std::vector argtypes; + methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); + s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + + methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); + s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + + std::vector data; + PUBLIC_CPPYY_DATA2(bool, bool); + PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (uchar, unsigned char); + PUBLIC_CPPYY_DATA3(short, short, h); + PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); + PUBLIC_CPPYY_DATA3(int, int, i); + PUBLIC_CPPYY_DATA3(uint, unsigned int, I); + PUBLIC_CPPYY_DATA3(long, long, l); + PUBLIC_CPPYY_DATA3(ulong, unsigned long, L); + PUBLIC_CPPYY_DATA (llong, long long); + PUBLIC_CPPYY_DATA (ullong, unsigned long long); + PUBLIC_CPPYY_DATA3(float, float, f); + PUBLIC_CPPYY_DATA3(double, double, d); + PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (voidp, void*); + + PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); + PUBLIC_CPPYY_STATIC_DATA(short, short); + PUBLIC_CPPYY_STATIC_DATA(ushort, unsigned short); + PUBLIC_CPPYY_STATIC_DATA(int, int); + PUBLIC_CPPYY_STATIC_DATA(uint, unsigned int); + PUBLIC_CPPYY_STATIC_DATA(long, long); + PUBLIC_CPPYY_STATIC_DATA(ulong, unsigned long); + PUBLIC_CPPYY_STATIC_DATA(llong, long long); + PUBLIC_CPPYY_STATIC_DATA(ullong, unsigned long long); + PUBLIC_CPPYY_STATIC_DATA(float, float); + PUBLIC_CPPYY_STATIC_DATA(double, double); + PUBLIC_CPPYY_STATIC_DATA(enum, cppyy_test_data::what); + PUBLIC_CPPYY_STATIC_DATA(voidp, void*); + + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size(), data); + s_scopes[(cppyy_scope_t)s_scope_id] = info; + } // -- class cppyy_test_data + } } _init; @@ -230,155 +374,387 @@ /* method/function dispatching -------------------------------------------- */ void cppyy_call_v(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { - switch ((long)method) { - case 5: // static void example01:;staticSetPayload(payload* p, double d) + long idx = (long)method; + if (idx == s_methods["static_example01::staticSetPayload_payload*_double"]) { assert(!self && nargs == 2); dummy::example01::staticSetPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 9: // static void example01::setCount(int) + } else if (idx == s_methods["static_example01::setCount_int"]) { assert(!self && nargs == 1); dummy::example01::setCount(((CPPYY_G__value*)args)[0].obj.in); - break; - case 20: // void example01::setPayload(payload* p); + } else if (idx == s_methods["example01::setPayload_payload*"]) { assert(self && nargs == 1); ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::destroy_arrays"]) { + assert(self && nargs == 0); + ((dummy::cppyy_test_data*)self)->destroy_arrays(); + } else if (idx == s_methods["cppyy_test_data::set_bool"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_char"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["cppyy_test_data::set_uchar"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); + } else if (idx == s_methods["cppyy_test_data::set_short"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); + } else if (idx == s_methods["cppyy_test_data::set_short_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_short_c(*(short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ushort"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); + } else if (idx == s_methods["cppyy_test_data::set_ushort_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ushort_c(*(unsigned short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_int"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["cppyy_test_data::set_int_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_int_c(*(int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_uint"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["cppyy_test_data::set_uint_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_uint_c(*(unsigned int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_long"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["cppyy_test_data::set_long_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_long_c(*(long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ulong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); + } else if (idx == s_methods["cppyy_test_data::set_ulong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ulong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_llong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); + } else if (idx == s_methods["cppyy_test_data::set_llong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_llong_c(*(long long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_ullong"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); + } else if (idx == s_methods["cppyy_test_data::set_ullong_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_ullong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_float"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); + } else if (idx == s_methods["cppyy_test_data::set_float_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_float_c(*(float*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["cppyy_test_data::set_double"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["cppyy_test_data::set_double_c"]) { + assert(self && nargs == 1); + ((dummy::cppyy_test_data*)self)->set_double_c(*(double*)&((CPPYY_G__value*)args)[0]); + } else { assert(!"method unknown in cppyy_call_v"); - break; } } +unsigned char cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + unsigned char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_bool"]) { + assert(self && nargs == 0); + result = (unsigned char)((dummy::cppyy_test_data*)self)->get_bool(); + } else { + assert(!"method unknown in cppyy_call_b"); + } + return result; +} + +char cppyy_call_c(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + char result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_char"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_char(); + } else if (idx == s_methods["cppyy_test_data::get_uchar"]) { + assert(self && nargs == 0); + result = (char)((dummy::cppyy_test_data*)self)->get_uchar(); + } else { + assert(!"method unknown in cppyy_call_c"); + } + return result; +} + +short cppyy_call_h(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + short result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_short"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_short(); + } else if (idx == s_methods["cppyy_test_data::get_ushort"]) { + assert(self && nargs == 0); + result = (short)((dummy::cppyy_test_data*)self)->get_ushort(); + } else { + assert(!"method unknown in cppyy_call_h"); + } + return result; +} + int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { int result = 0; - switch ((long)method) { - case 1: // static int example01::staticAddOneToInt(int) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddOneToInt_int"]) { assert(!self && nargs == 1); result = dummy::example01::staticAddOneToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 2: // static int example01::staticAddOneToInt(int, int) + } else if (idx == s_methods["static_example01::staticAddOneToInt_int_int"]) { assert(!self && nargs == 2); result = dummy::example01::staticAddOneToInt( ((CPPYY_G__value*)args)[0].obj.in, ((CPPYY_G__value*)args)[1].obj.in); - break; - case 3: // static int example01::staticAtoi(const char* str) + } else if (idx == s_methods["static_example01::staticAtoi_cchar*"]) { assert(!self && nargs == 1); result = dummy::example01::staticAtoi((const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 8: // static int example01::getCount() + } else if (idx == s_methods["static_example01::getCount"]) { assert(!self && nargs == 0); result = dummy::example01::getCount(); - break; - case 12: // int example01::addDataToInt(int a) + } else if (idx == s_methods["example01::addDataToInt_int"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToInt(((CPPYY_G__value*)args)[0].obj.in); - break; - case 18: // int example01::addDataToAtoi(const char* str) + } else if (idx == s_methods["example01::addDataToAtoi_cchar*"]) { assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_int"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_int(); + } else { assert(!"method unknown in cppyy_call_i"); - break; } return result; } long cppyy_call_l(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { long result = 0; - switch ((long)method) { - case 4: // static char* example01::staticStrcpy(const char* strin) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticStrcpy_cchar*"]) { assert(!self && nargs == 1); result = (long)dummy::example01::staticStrcpy( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 6: // static payload* example01::staticCyclePayload(payload* p, double d) + } else if (idx == s_methods["static_example01::staticCyclePayload_payload*_double"]) { assert(!self && nargs == 2); result = (long)dummy::example01::staticCyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0]), ((CPPYY_G__value*)args)[1].obj.d); - break; - case 19: // char* example01::addToStringValue(const char* str) + } else if (idx == s_methods["example01::addToStringValue_cchar*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->addToStringValue( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - case 21: // payload* example01::cyclePayload(payload* p) + } else if (idx == s_methods["example01::cyclePayload_payload*"]) { assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - break; - default: + } else if (idx == s_methods["cppyy_test_data::get_uint"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint(); + } else if (idx == s_methods["cppyy_test_data::get_long"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_long(); + } else if (idx == s_methods["cppyy_test_data::get_ulong"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array(); + } else if (idx == s_methods["cppyy_test_data::get_bool_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_bool_array2(); + } else if (idx == s_methods["cppyy_test_data::get_short_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array(); + } else if (idx == s_methods["cppyy_test_data::get_short_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_short_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array(); + } else if (idx == s_methods["cppyy_test_data::get_ushort_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array2(); + } else if (idx == s_methods["cppyy_test_data::get_int_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array(); + } else if (idx == s_methods["cppyy_test_data::get_int_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_int_array2(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array(); + } else if (idx == s_methods["cppyy_test_data::get_uint_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_uint_array2(); + } else if (idx == s_methods["cppyy_test_data::get_long_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array(); + } else if (idx == s_methods["cppyy_test_data::get_long_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_long_array2(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array(); + } else if (idx == s_methods["cppyy_test_data::get_ulong_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array2(); + } else if (idx == s_methods["cppyy_test_data::pass_array_short"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_h"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_h( + (*(short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ushort"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_H"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_H( + (*(unsigned short**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_int"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_i"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_i( + (*(int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_uint"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_I"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_I( + (*(unsigned int**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_long"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_l"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_l( + (*(long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_ulong"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_L"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_L( + (*(unsigned long**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_float"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_f"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_f( + (*(float**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_array_double"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_array( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else if (idx == s_methods["cppyy_test_data::pass_void_array_d"]) { + assert(self && nargs == 1); + result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_d( + (*(double**)&((CPPYY_G__value*)args)[0])); + } else { assert(!"method unknown in cppyy_call_l"); - break; } return result; } +long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + long long result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_llong"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_llong(); + } else if (idx == s_methods["cppyy_test_data::get_ullong"]) { + assert(self && nargs == 0); + result = (long long)((dummy::cppyy_test_data*)self)->get_ullong(); + } else { + assert(!"method unknown in cppyy_call_ll"); + } + return result; +} + +float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + float result = 0; + const long idx = (long)method; + if (idx == s_methods["cppyy_test_data::get_float"]) { + assert(self && nargs == 0); + result = ((dummy::cppyy_test_data*)self)->get_float(); + } else { + assert(!"method unknown in cppyy_call_f"); + } + return result; +} + double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { double result = 0.; - switch ((long)method) { - case 0: // static double example01::staticAddToDouble(double) + const long idx = (long)method; + if (idx == s_methods["static_example01::staticAddToDouble_double"]) { assert(!self && nargs == 1); From noreply at buildbot.pypy.org Fri May 2 08:34:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:34:37 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: these are __builtin__ on cpython Message-ID: <20140502063437.6C3021C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71190:86022cc29880 Date: 2014-05-02 02:27 -0400 http://bitbucket.org/pypy/pypy/changeset/86022cc29880/ Log: these are __builtin__ on cpython diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -75,7 +75,6 @@ MultibyteIncrementalDecoder.typedef = TypeDef( 'MultibyteIncrementalDecoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbidecoder_new), decode = interp2app(MultibyteIncrementalDecoder.decode_w), reset = interp2app(MultibyteIncrementalDecoder.reset_w), @@ -124,7 +123,6 @@ MultibyteIncrementalEncoder.typedef = TypeDef( 'MultibyteIncrementalEncoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbiencoder_new), encode = interp2app(MultibyteIncrementalEncoder.encode_w), reset = interp2app(MultibyteIncrementalEncoder.reset_w), diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -46,7 +46,6 @@ MultibyteCodec.typedef = TypeDef( 'MultibyteCodec', - __module__ = '_multibytecodec', decode = interp2app(MultibyteCodec.decode), encode = interp2app(MultibyteCodec.encode), ) From noreply at buildbot.pypy.org Fri May 2 08:34:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:34:38 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix continulet __module__ Message-ID: <20140502063438.A08B21C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71191:505508c43ec0 Date: 2014-05-02 02:29 -0400 http://bitbucket.org/pypy/pypy/changeset/505508c43ec0/ Log: fix continulet __module__ diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -136,8 +136,7 @@ W_Continulet.typedef = TypeDef( - 'continulet', - __module__ = '_continuation', + '_continuation.continulet', __new__ = interp2app(W_Continulet___new__), __init__ = interp2app(W_Continulet.descr_init), switch = interp2app(W_Continulet.descr_switch), From noreply at buildbot.pypy.org Fri May 2 08:34:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 08:34:39 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix translation Message-ID: <20140502063439.B97F21C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71192:a503f5354eb5 Date: 2014-05-02 02:34 -0400 http://bitbucket.org/pypy/pypy/changeset/a503f5354eb5/ Log: fix translation diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -479,7 +479,7 @@ return self.getdictvalue(space, '__module__') else: dot = self.name.find('.') - if dot != -1: + if dot >= 0: mod = self.name[:dot] else: mod = "__builtin__" @@ -490,7 +490,7 @@ return self.name else: dot = self.name.find('.') - if dot != -1: + if dot >= 0: return self.name[dot+1:] else: return self.name From noreply at buildbot.pypy.org Fri May 2 09:31:38 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 09:31:38 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: minor changes Message-ID: <20140502073138.F36E31C01CB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5211:94a4f114fe63 Date: 2014-05-02 09:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/94a4f114fe63/ Log: minor changes diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -123,15 +123,15 @@ \section{Discussion} -\paragraph{dynamic language VM problems} -XXX: -- high allocation rate (short lived objects)\\ -- (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) +%% \paragraph{dynamic language VM problems} +%% XXX: +%% - high allocation rate (short lived objects)\\ +%% - (don't know anything about the program that runs until it actually runs: arbitrary atomic block size) \subsection{Why is there a GIL?} The GIL is a very simple synchronisation mechanism for supporting -multi-threading in the interpreter. The basic guarantee is that the +multithreading in the interpreter. The basic guarantee is that the GIL may only be released in-between bytecode instructions. The interpreter can thus rely on complete isolation and atomicity of these instructions. Additionally, it provides the application with a @@ -139,7 +139,7 @@ on certain operations to be atomic and that they will always be executed in the order in which they appear in the code. While depending on this may not always be a good idea, it is done in -practice. A solution replacing the GIL should therefore uphold these +practice. A GIL-replacement should therefore uphold these guarantees, while preferably also be as easily implementable as a GIL for the interpreter. [xxx mention that the interpreter is typically very large and maintained @@ -151,7 +151,7 @@ thread-safe can voluntarily release the GIL themselves in order to still provide some parallelism. This is done for example for potentially long I/O operations. Consequently, I/O-bound, -multi-threaded applications can actually parallelise to some +multithreaded applications can actually parallelise to some degree. Again, a potential solution should be able to integrate with external libraries with similar ease. We will however focus our argumentation more on running code in the interpreted language in @@ -277,9 +277,7 @@ recently gained a lot of popularity by its introduction in common desktop CPUs from Intel (Haswell generation). -\paragraph{HTM} - -HTM provides us with transactions like any TM system does. It can +\paragraph{HTM} provides us with transactions like any TM system does. It can be used as a direct replacement for the GIL. However, as is common with hardware-only solutions, there are quite a few limitations that can not be lifted easily. For this comparison, we look at @@ -304,16 +302,14 @@ synchronisation mechanism for the application. It is not possible in general to expose the hardware-transactions to the application in the form of atomic blocks because that would require much -longer transactions. +longer transactions. %% - false-sharing on cache-line level\\ %% - limited capacity (caches, undocumented)\\ %% - random aborts (haswell)\\ %% - generally: transaction-length limited (no atomic blocks) -\paragraph{STM} - -STM provides all the same benefits as HTM except for its performance. +\paragraph{STM} provides all the same benefits as HTM except for its performance. It is not unusual for the overhead introduced by STM to be between 100\% to even 1000\%. While STM systems often scale very well to a big number of threads and eventually overtake the single-threaded @@ -347,9 +343,9 @@ & \textbf{GIL} & \textbf{Fine-grained locking} & \textbf{Shared-nothing} & \textbf{HTM} & \textbf{STM}\\ \hline - Performance (single-threaded) & ++ & + & ++ & ++ & -{-} \\ + Performance (single threaded) & ++ & + & ++ & ++ & -{-} \\ \hline - Performance (multi-threaded) & -{-} & + & + & + & + \\ + Performance (multithreaded) & -{-} & + & + & + & + \\ \hline Existing applications & ++ & ++ & -{-} & ++ & ++ \\ \hline @@ -357,7 +353,7 @@ \hline Implementation & ++ & - & ++ & ++ & ++ \\ \hline - External libra\-ries & ++ & ++ & ++ & ++ & ++ \\ + External libraries & ++ & ++ & ++ & ++ & ++ \\ \hline \end{tabular} \caption{Comparison between the approaches (-{-}/-/o/+/++)} @@ -405,7 +401,7 @@ \acks -Acknowledgments... +Acknowledgements... % We recommend abbrvnat bibliography style. @@ -423,4 +419,3 @@ \end{document} - From noreply at buildbot.pypy.org Fri May 2 09:39:47 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 09:39:47 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: additions Message-ID: <20140502073947.522A71C00B9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5212:884401952998 Date: 2014-05-02 09:39 +0200 http://bitbucket.org/pypy/extradoc/changeset/884401952998/ Log: additions diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -206,7 +206,12 @@ access the same objects in parallel. What we lose instead is the simplicity of the GIL approach. With every additional lock, the likeliness of deadlocks grows, as well as the overhead that acquiring -and releasing locks produces. +and releasing locks produces. The former means that sometimes it is +necessary to fall back to less fine-grained locking, preventing some +potential parallelism in order to keep the complexity manageable. +The latter means that we lose a bit of performance in the +single-threaded case compared to the GIL, which requires much less +acquire-release operations. Jython\footnote{www.jython.org} is one project that implements an interpreter for Python on the JVM\footnote{Java Virtual Machine} and @@ -215,7 +220,7 @@ carefully placed locks. Since there is no central location, the complexity of the implementation is quite a bit greater compared to using a GIL. Integrating external, non-thread-safe libraries should -however be very simple too. One could simply use one lock per library +however be very simple too. One could simply use one lock per library to avoid this issue. In the end, fine-grained locking can transparently replace the GIL @@ -230,7 +235,7 @@ %% - (there are some semantic differences, right? not given perfect lock-placement, but well) %% ( http://www.jython.org/jythonbook/en/1.0/Concurrency.html ) -\subsubsection{Shared-Nothing / multiple processes} +\subsubsection{Shared-Nothing} There are also approaches that work around the GIL instead of trying to replace it. If an application can be split into completely From noreply at buildbot.pypy.org Fri May 2 10:11:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 10:11:19 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: start citing things Message-ID: <20140502081119.C398F1C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5213:25fe21890b67 Date: 2014-05-02 10:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/25fe21890b67/ Log: start citing things diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -10,6 +10,7 @@ \usepackage[utf8]{inputenc} \usepackage{array} +\usepackage{hyperref} \usepackage{amsmath} @@ -213,7 +214,7 @@ single-threaded case compared to the GIL, which requires much less acquire-release operations. -Jython\footnote{www.jython.org} is one project that implements an +Jython\cite{webjython} is one project that implements an interpreter for Python on the JVM\footnote{Java Virtual Machine} and that uses fine-grained locking to correctly synchronise the interpreter. For a language like Python, one needs quite a few, @@ -282,46 +283,48 @@ recently gained a lot of popularity by its introduction in common desktop CPUs from Intel (Haswell generation). -\paragraph{HTM} provides us with transactions like any TM system does. It can -be used as a direct replacement for the GIL. However, as is common -with hardware-only solutions, there are quite a few limitations -that can not be lifted easily. For this comparison, we look at -the implementation of Intel in recent Haswell generation CPUs. +\paragraph{HTM} provides us with transactions like any TM system does. +It can be used as a direct replacement for the GIL. However, as is +common with hardware-only solutions, there are quite a few limitations +that can not be lifted easily. For this comparison, we look at the +implementation of Intel in recent Haswell generation CPUs. HTM in these CPUs works on the level of caches. This has a few consequences like false-sharing on the cache-line level, and most importantly it limits the amount of memory that can be accessed within a transaction. This transaction-length limitation makes it necessary to have a fallback in place in case this limit is reached. In recent -attempts, the usual fallback is the GIL (XXX: cite). The current -generation of HTM hits this limit very often for our use case (XXX: -cite ruby GIL paper) and therefore does not parallelise that well. +attempts, the usual fallback is the GIL\cite{odaira14}. In our +experiments, the current generation of HTM proved to be very fragile +and thus needing the fallback very often. Consequently, scalability +suffered a lot from this. -The performance of HTM is pretty good (XXX: cite again...) as it does -not introduce much overhead. And it can transparently parallelise -existing applications to some degree. The implementation is very -straight-forward because it directly replaces the GIL in a central -place. HTM is also directly compatible with any external library that -needs to be integrated and synchronised for use in multiple -threads. The one thing that is missing is support for a better -synchronisation mechanism for the application. It is not possible -in general to expose the hardware-transactions to the application -in the form of atomic blocks because that would require much -longer transactions. +The performance of HTM is pretty good as it does not introduce much +overhead ($<40\%$ overhead\cite{odaira14}). And it can transparently +parallelise existing applications to some degree. The implementation +is very straight-forward because it directly replaces the GIL in a +central place. HTM is also directly compatible with any external +library that needs to be integrated and synchronised for use in +multiple threads. The one thing that is missing is support for a +better synchronisation mechanism for the application. It is not +possible in general to expose the hardware-transactions to the +application in the form of atomic blocks because that would require +much longer transactions. %% - false-sharing on cache-line level\\ %% - limited capacity (caches, undocumented)\\ %% - random aborts (haswell)\\ %% - generally: transaction-length limited (no atomic blocks) -\paragraph{STM} provides all the same benefits as HTM except for its performance. -It is not unusual for the overhead introduced by STM to be between -100\% to even 1000\%. While STM systems often scale very well to a big -number of threads and eventually overtake the single-threaded -execution, they often provide no benefits at all for low numbers of -threads (1-8). There are some attempts (XXX: cite fastlane) that can -reduce the overhead a lot, but also scale very badly so that their -benefit on more than one thread is little. +\paragraph{STM} provides all the same benefits as HTM except for its +performance. It is not unusual for the overhead introduced by STM to +be between 100\% to even 1000\% \cite{cascaval08,drago11}. While STM +systems often scale very well to a big number of threads and +eventually overtake the single-threaded execution, they often provide +no benefits at all for low numbers of threads (1-8). There are some +attempts \cite{warmhoff13} that can reduce the overhead a lot, but +also scale very badly so that their benefit on more than one thread is +little. However, STM compared to HTM does not suffer from the same restricting limitations. Transactions can be arbitrarily long. This makes it @@ -418,7 +421,30 @@ \softraggedright \bibitem[Smith et~al.(2009)Smith, Jones]{smith02} -P. Q. Smith, and X. Y. Jones. ...reference text... + P. Q. Smith, and X. Y. Jones. ...reference text... + +\bibitem{webjython} + The Jython Project, \url{www.jython.org} + +\bibitem{odaira14} + Odaira, Rei, Jose G. Castanos, and Hisanobu Tomari. "Eliminating + global interpreter locks in Ruby through hardware transactional + memory." \emph{Proceedings of the 19th ACM SIGPLAN symposium on + Principles and practice of parallel programming.} ACM, 2014. + +\bibitem{warmhoff13} + Wamhoff, Jons-Tobias, et al. "FastLane: improving performance of + software transactional memory for low thread counts." + \emph{Proceedings of the 18th ACM SIGPLAN symposium on Principles + and practice of parallel programming.} ACM, 2013. + +\bibitem{drago11} + Dragojević, Aleksandar, et al. "Why STM can be more than a research + toy." \emph{Communications of the ACM} 54.4 (2011): 70-77. + +\bibitem{cascaval08} + Cascaval, Calin, et al. "Software transactional memory: Why is it + only a research toy?." \emph{Queue} 6.5 (2008): 40. \end{thebibliography} From noreply at buildbot.pypy.org Fri May 2 10:17:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 10:17:04 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Avoid using the word 'hardware' to describe our purely-STM system. Message-ID: <20140502081704.22F751C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5214:2c6c55e951e5 Date: 2014-05-02 10:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/2c6c55e951e5/ Log: Avoid using the word 'hardware' to describe our purely-STM system. diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -383,9 +383,10 @@ synchronise memory accesses using atomic blocks. Unfortunately, STM has a big performance problem. One way to approach -this problem is to make STM systems that use the available hardware -better. We are currently working on a STM system that makes use of -several hardware features like virtual memory and memory segmentation. +this problem is to make STM systems that make better use of low-level +features in existing OS kernels. +We are currently working on a STM system that makes use of +several such features like virtual memory and memory segmentation. We further tailor the system to the discussed use case which gives us an advantage over other STM systems that are more general. With this approach, initial results suggest that we can keep the overhead of STM From noreply at buildbot.pypy.org Fri May 2 10:26:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 10:26:06 +0200 (CEST) Subject: [pypy-commit] pypy default: backout da193f0b119d. Done in this way, "print()" will print an empty Message-ID: <20140502082606.943871C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71193:0bdf01779070 Date: 2014-05-02 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/0bdf01779070/ Log: backout da193f0b119d. Done in this way, "print()" will print an empty tuple in Python 2. Is this really necessary for the py3k branch? diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.__code__: + if entry.code == f1.func_code: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.__code__] - ebar = entries[bar.__code__] + efoo = entries[foo.func_code] + ebar = entries[bar.func_code] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -219,12 +219,12 @@ lines.remove(line) break else: - print('NOT FOUND: %s' % pattern.rstrip('\n')) - print('--- GOT ---') - print(got) - print() - print('--- EXPECTED ---') - print(expected) + print 'NOT FOUND:', pattern.rstrip('\n') + print '--- GOT ---' + print got + print + print '--- EXPECTED ---' + print expected assert False assert not lines finally: From noreply at buildbot.pypy.org Fri May 2 10:32:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 10:32:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip this line of the test on 32-bit, with an explanation about why. Message-ID: <20140502083234.737631C0A66@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71194:0e00b9b987eb Date: 2014-05-02 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/0e00b9b987eb/ Log: Skip this line of the test on 32-bit, with an explanation about why. diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestCProfile(BaseTestPyPyC): @@ -26,6 +26,10 @@ for method in ['append', 'pop']: loop, = log.loops_by_id(method) print loop.ops_by_id(method) - assert ' call(' not in repr(loop.ops_by_id(method)) + # on 32-bit, there is f1=read_timestamp(); ...; + # f2=read_timestamp(); f3=call(llong_sub,f1,f2) + # which should turn into a single PADDQ/PSUBQ + if sys.maxint != 2147483647: + assert ' call(' not in repr(loop.ops_by_id(method)) assert ' call_may_force(' not in repr(loop.ops_by_id(method)) assert ' cond_call(' in repr(loop.ops_by_id(method)) From noreply at buildbot.pypy.org Fri May 2 10:43:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 10:43:16 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more random citescites... Message-ID: <20140502084316.52E331C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5215:72d2204d81d3 Date: 2014-05-02 10:42 +0200 http://bitbucket.org/pypy/extradoc/changeset/72d2204d81d3/ Log: more random citescites... diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -284,7 +284,7 @@ desktop CPUs from Intel (Haswell generation). \paragraph{HTM} provides us with transactions like any TM system does. -It can be used as a direct replacement for the GIL. However, as is +It can be used as a direct replacement for the GIL\cite{nicholas06,odaira14,fuad10}. However, as is common with hardware-only solutions, there are quite a few limitations that can not be lifted easily. For this comparison, we look at the implementation of Intel in recent Haswell generation CPUs. @@ -294,7 +294,7 @@ importantly it limits the amount of memory that can be accessed within a transaction. This transaction-length limitation makes it necessary to have a fallback in place in case this limit is reached. In recent -attempts, the usual fallback is the GIL\cite{odaira14}. In our +attempts, the usual fallback is the GIL\cite{odaira14,fuad10}. In our experiments, the current generation of HTM proved to be very fragile and thus needing the fallback very often. Consequently, scalability suffered a lot from this. @@ -387,12 +387,12 @@ better. We are currently working on a STM system that makes use of several hardware features like virtual memory and memory segmentation. We further tailor the system to the discussed use case which gives us -an advantage over other STM systems that are more general. With this +an advantage over other STM systems that are more general. With this approach, initial results suggest that we can keep the overhead of STM -already below 50\%. A hybrid TM system, which also uses HTM to -accelerate certain tasks, looks like a very promising direction of -research too. In general we believe that further work to reduce the -overhead of STM is very worthwhile. +below 50\%. A hybrid TM system, which also uses HTM to accelerate +certain tasks, looks like a very promising direction of research +too. In general we believe that further work to reduce the overhead of +STM is very worthwhile. @@ -446,6 +446,18 @@ Cascaval, Calin, et al. "Software transactional memory: Why is it only a research toy?." \emph{Queue} 6.5 (2008): 40. +\bibitem{nicholas06} + Nicholas Riley and Craig Zilles. 2006. Hardware tansactional memory + support for lightweight dynamic language evolution. \emph{In + Companion to the 21st ACM SIGPLAN symposium on Object-oriented + programming systems, languages, and applications} (OOPSLA + '06). ACM, New York, NY, USA + +\bibitem{fuad10} + Fuad Tabba. 2010. Adding concurrency in python using a commercial + processor's hardware transactional memory support. \emph{SIGARCH + Comput. Archit. News 38}, 5 (April 2010) + \end{thebibliography} From noreply at buildbot.pypy.org Fri May 2 10:43:17 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 10:43:17 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20140502084317.6BF9A1C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5216:d7be425514e9 Date: 2014-05-02 10:43 +0200 http://bitbucket.org/pypy/extradoc/changeset/d7be425514e9/ Log: merge diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -383,9 +383,10 @@ synchronise memory accesses using atomic blocks. Unfortunately, STM has a big performance problem. One way to approach -this problem is to make STM systems that use the available hardware -better. We are currently working on a STM system that makes use of -several hardware features like virtual memory and memory segmentation. +this problem is to make STM systems that make better use of low-level +features in existing OS kernels. +We are currently working on a STM system that makes use of +several such features like virtual memory and memory segmentation. We further tailor the system to the discussed use case which gives us an advantage over other STM systems that are more general. With this approach, initial results suggest that we can keep the overhead of STM From noreply at buildbot.pypy.org Fri May 2 10:49:30 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 10:49:30 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: remove example citation Message-ID: <20140502084930.DDFB41C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5217:e020fe62fcfc Date: 2014-05-02 10:49 +0200 http://bitbucket.org/pypy/extradoc/changeset/e020fe62fcfc/ Log: remove example citation diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -421,9 +421,6 @@ \begin{thebibliography}{} \softraggedright -\bibitem[Smith et~al.(2009)Smith, Jones]{smith02} - P. Q. Smith, and X. Y. Jones. ...reference text... - \bibitem{webjython} The Jython Project, \url{www.jython.org} From noreply at buildbot.pypy.org Fri May 2 10:54:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 10:54:59 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: typo Message-ID: <20140502085459.131BB1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5218:616a76d3b5b9 Date: 2014-05-02 10:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/616a76d3b5b9/ Log: typo diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -445,7 +445,7 @@ only a research toy?." \emph{Queue} 6.5 (2008): 40. \bibitem{nicholas06} - Nicholas Riley and Craig Zilles. 2006. Hardware tansactional memory + Nicholas Riley and Craig Zilles. 2006. Hardware transactional memory support for lightweight dynamic language evolution. \emph{In Companion to the 21st ACM SIGPLAN symposium on Object-oriented programming systems, languages, and applications} (OOPSLA From noreply at buildbot.pypy.org Fri May 2 11:35:43 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 11:35:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add something about automatic barrier placement Message-ID: <20140502093543.747AB1D2BF6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5219:511162cc1095 Date: 2014-05-02 11:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/511162cc1095/ Log: add something about automatic barrier placement diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -337,6 +337,14 @@ parallel programming forward. Together with sequential consistency it provides a lot of simplification for parallel applications. +While one can argue that STM requires the insertion of read and write +barriers in the whole program, this can be done automatically and +locally by a program transformation\cite{felber07}. There are attempts +to do the same for fine-grained locking\cite{bill06} but they require +a whole program analysis since locks are inherently non-composable. +The effectiveness of these approaches still has to be proven for our +use case. + %% - overhead (100-1000\%) (barrier reference resolution, kills performance on low \#cpu) %% (FastLane: low overhead, not much gain)\\ %% - unlimited transaction length (easy atomic blocks) @@ -456,6 +464,17 @@ processor's hardware transactional memory support. \emph{SIGARCH Comput. Archit. News 38}, 5 (April 2010) +\bibitem{felber07} + Felber, Pascal, et al. "Transactifying applications using an open + compiler framework." \emph{TRANSACT}, August (2007): 4-6. + +\bibitem{bill06} + Bill McCloskey, Feng Zhou, David Gay, and Eric + Brewer. 2006. Autolocker: synchronization inference for atomic + sections. \emph{In Conference record of the 33rd ACM SIGPLAN-SIGACT + symposium on Principles of programming languages (POPL '06)}. ACM, + New York, NY, USA + \end{thebibliography} From noreply at buildbot.pypy.org Fri May 2 12:52:21 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Fri, 2 May 2014 12:52:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk 64bit-c2: Merged default Message-ID: <20140502105221.605531C00B9@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: 64bit-c2 Changeset: r796:e6c406381c28 Date: 2014-04-24 14:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e6c406381c28/ Log: Merged default diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -12,3 +12,5 @@ versions coglinux *.orig +spy-*.log +SDL.dll diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,12 +15,14 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref -from spyvm import constants, error, system +from spyvm import constants, error, version, system +from spyvm.version import elidable_for_version from rpython.rlib import rrandom, objectmodel, jit, signature from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint, r_int +from rpython.rlib.debug import make_sure_not_resized from rpython.tool.pairtype import extendabletype -from rpython.rlib.objectmodel import instantiate, compute_hash +from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin from rpython.rtyper.lltypesystem import lltype, rffi from rsdl import RSDL, RSDL_helper @@ -448,7 +450,7 @@ def __str__(self): if isinstance(self, W_PointersObject) and self.has_shadow(): - return self._shadow.getname() + return self._get_shadow().getname() else: name = None if self.has_class(): @@ -488,15 +490,20 @@ class W_AbstractPointersObject(W_AbstractObjectWithClassReference): """Common object.""" - _attrs_ = ['_shadow'] + _attrs_ = ['shadow'] + + def changed(self): + # This is invoked when an instance-variable is changed. + # Kept here in case it might be usefull in the future. + pass - _shadow = None # Default value + shadow = None # Default value @jit.unroll_safe def __init__(self, space, w_class, size): """Create new object with size = fixed + variable size.""" W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._shadow = None # Default value + self.store_shadow(None) def fillin(self, space, g_self): from spyvm.fieldtypes import fieldtypes_of @@ -514,12 +521,12 @@ def fetch(self, space, n0): if self.has_shadow(): - return self._shadow.fetch(n0) + return self._get_shadow().fetch(n0) return self._fetch(n0) def store(self, space, n0, w_value): if self.has_shadow(): - return self._shadow.store(n0, w_value) + return self._get_shadow().store(n0, w_value) return self._store(n0, w_value) def varsize(self, space): @@ -533,13 +540,17 @@ def size(self): if self.has_shadow(): - return self._shadow.size() + return self._get_shadow().size() return self.basic_size() def store_shadow(self, shadow): - assert self._shadow is None or self._shadow is shadow - self._shadow = shadow + assert self.shadow is None or self.shadow is shadow + self.shadow = shadow + self.changed() + def _get_shadow(self): + return self.shadow + @objectmodel.specialize.arg(2) def attach_shadow_of_class(self, space, TheClass): shadow = TheClass(space, self) @@ -549,7 +560,7 @@ @objectmodel.specialize.arg(2) def as_special_get_shadow(self, space, TheClass): - shadow = self._shadow + shadow = self._get_shadow() if not isinstance(shadow, TheClass): if shadow is not None: raise DetachingShadowError(shadow, TheClass) @@ -568,7 +579,7 @@ # Should only be used during squeak-image loading. def as_class_get_penumbra(self, space): from spyvm.shadow import ClassShadow - s_class = self._shadow + s_class = self._get_shadow() if s_class is None: s_class = ClassShadow(space, self) self.store_shadow(s_class) @@ -587,7 +598,7 @@ def as_context_get_shadow(self, space): from spyvm.shadow import ContextPartShadow # XXX TODO should figure out itself if its method or block context - if self._shadow is None: + if self._get_shadow() is None: if ContextPartShadow.is_block_context(self, space): return self.as_blockcontext_get_shadow(space) return self.as_methodcontext_get_shadow(space) @@ -606,17 +617,19 @@ return self.as_special_get_shadow(space, ObserveeShadow) def has_shadow(self): - return self._shadow is not None + return self._get_shadow() is not None def become(self, w_other): if not isinstance(w_other, W_AbstractPointersObject): return False # switching means also switching shadows - self._shadow, w_other._shadow = w_other._shadow, self._shadow + self.shadow, w_other.shadow = w_other.shadow, self.shadow # shadow links are in both directions -> also update shadows - if self.has_shadow(): self._shadow._w_self = self - if w_other.has_shadow(): w_other._shadow._w_self = w_other + if self.shadow is not None: self.shadow._w_self = self + if w_other.shadow is not None: w_other.shadow._w_self = w_other W_AbstractObjectWithClassReference._become(self, w_other) + self.changed() + w_other.changed() return True @jit.elidable @@ -633,24 +646,27 @@ from spyvm.fieldtypes import fieldtypes_of_length """Create new object with size = fixed + variable size.""" W_AbstractPointersObject.__init__(self, space, w_class, size) - vars = self._vars = [None] * size + vars = [None] * size + self.set_vars(vars) self.fieldtypes = fieldtypes_of_length(self.s_class, size) for i in range(size): # do it by hand for the JIT's sake vars[i] = w_nil - + + def set_vars(self, new_vars): + self._vars = new_vars + make_sure_not_resized(self._vars) + def fillin(self, space, g_self): W_AbstractPointersObject.fillin(self, space, g_self) from spyvm.fieldtypes import fieldtypes_of - self._vars = g_self.get_pointers() + self.set_vars(g_self.get_pointers()) self.fieldtypes = fieldtypes_of(self) def _fetch(self, n0): - # return self._vars[n0] fieldtypes = jit.promote(self.fieldtypes) return fieldtypes.fetch(self, n0) def _store(self, n0, w_value): - # self._vars[n0] = w_value fieldtypes = jit.promote(self.fieldtypes) return fieldtypes.store(self, n0, w_value) @@ -671,7 +687,7 @@ def clone(self, space): w_result = W_PointersObject(self.space, self.getclass(space), len(self._vars)) - w_result._vars = [self.fetch(space, i) for i in range(len(self._vars))] + w_result.set_vars([self.fetch(space, i) for i in range(len(self._vars))]) return w_result def fieldtype(self): @@ -996,6 +1012,8 @@ _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] _immutable_fields_ = ['_realsize', 'display', '_depth', '_real_depth_buffer'] + pixelbuffer = None + @staticmethod def create(space, w_class, size, depth, display): if depth < 8: @@ -1202,11 +1220,11 @@ if len(self.literals) > 0: w_candidate = self.literals[-1] if isinstance(w_candidate, W_PointersObject): - c_shadow = w_candidate._shadow + c_shadow = w_candidate._get_shadow() if c_shadow is None and w_candidate.size() >= 2: w_class = w_candidate._fetch(1) if isinstance(w_class, W_PointersObject): - d_shadow = w_class._shadow + d_shadow = w_class._get_shadow() if isinstance(d_shadow, shadow.ClassShadow): classname = d_shadow.getname() elif isinstance(shadow, shadow.ClassShadow): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, shadow, wrapper, system +from spyvm import constants, model, shadow, wrapper, system, version from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize @@ -79,7 +79,7 @@ w_Class = self.classtable["w_Class"] s_Metaclass = self.classtable["w_Metaclass"].as_class_get_penumbra(self) # XXX - proto_shadow = w_ProtoObjectClass._shadow + proto_shadow = w_ProtoObjectClass.shadow proto_shadow.store_w_superclass(w_Class) # at this point, all classes that still lack a w_class are themselves # metaclasses @@ -335,7 +335,7 @@ # XXX s = instantiate(shadow.ClassShadow) s.space = space - s.version = shadow.Version() + s.version = version.Version() s._w_self = w_class s.subclass_s = {} s._s_superclass = None diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1,17 +1,10 @@ import weakref -from spyvm import model, constants, error, wrapper +from spyvm import model, constants, error, wrapper, version +from spyvm.version import elidable_for_version, constant_for_version from rpython.tool.pairtype import extendabletype from rpython.rlib import rarithmetic, jit - -def make_elidable_after_versioning(func): - @jit.elidable - def elidable_func(self, version, *args): - return func(self, *args) - def meth(self, *args): - jit.promote(self) - version = jit.promote(self.version) - return elidable_func(self, version, *args) - return meth +from rpython.rlib.objectmodel import import_from_mixin +from rpython.rlib.debug import make_sure_not_resized class AbstractShadow(object): """A shadow is an optional extra bit of information that @@ -38,10 +31,13 @@ class AbstractCachingShadow(AbstractShadow): _immutable_fields_ = ['version?'] _attrs_ = ['version'] + import_from_mixin(version.VersionMixin) + version = None + def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) - self.version = Version() + self.changed() def attach_shadow(self): self.w_self().store_shadow(self) @@ -59,11 +55,6 @@ AbstractShadow.store(self, n0, w_value) self.update() - def change(self): - self.version = Version() - -class Version: - pass # ____________________________________________________________ POINTERS = 0 @@ -87,7 +78,7 @@ _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] - + def __init__(self, space, w_self): # fields added here should also be in objspace.py:56ff, 300ff self.name = '' @@ -263,12 +254,12 @@ " True if instances of this class have data stored as numerical bytes " return self.format == BYTES - @make_elidable_after_versioning + @constant_for_version def isvariable(self): " True if instances of this class have indexed inst variables " return self.instance_varsized - @make_elidable_after_versioning + @constant_for_version def instsize(self): " Number of named instance variables for each instance of this class " return self._instance_size @@ -293,7 +284,7 @@ del self.subclass_s[s_other] def changed(self): - self.superclass_changed(Version()) + self.superclass_changed(version.Version()) # this is done, because the class-hierarchy contains cycles def superclass_changed(self, version): @@ -308,7 +299,7 @@ def __repr__(self): return "" % (self.name or '?',) - @make_elidable_after_versioning + @constant_for_version def lookup(self, w_selector): look_in_shadow = self while look_in_shadow is not None: @@ -646,6 +637,7 @@ stacksize = self.stackend() - self.stackstart() tempsize = self.tempsize() self._temps_and_stack = [None] * (stacksize + tempsize) + make_sure_not_resized(self._temps_and_stack) for i in range(tempsize): self._temps_and_stack[i] = self.space.w_nil self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element @@ -1033,6 +1025,7 @@ "argsize", "islarge", "w_compiledin", "version"] _immutable_fields_ = ["version?", "_w_self"] + import_from_mixin(version.VersionMixin) def __init__(self, w_compiledmethod): self._w_self = w_compiledmethod @@ -1041,11 +1034,11 @@ def w_self(self): return self._w_self - @make_elidable_after_versioning + @constant_for_version def getliteral(self, index): return self.literals[index] - @make_elidable_after_versioning + @constant_for_version def compute_frame_size(self): # From blue book: normal mc have place for 12 temps+maxstack # mc for methods with islarge flag turned on 32 @@ -1058,7 +1051,7 @@ def update(self): w_compiledmethod = self._w_self - self.version = Version() + self.changed() self.bytecode = "".join(w_compiledmethod.bytes) self.bytecodeoffset = w_compiledmethod.bytecodeoffset() self.literalsize = w_compiledmethod.getliteralsize() @@ -1081,11 +1074,11 @@ association = wrapper.AssociationWrapper(None, w_association) self.w_compiledin = association.value() - @make_elidable_after_versioning + @constant_for_version def tempsize(self): return self._tempsize - @make_elidable_after_versioning + @constant_for_version def primitive(self): return self._primitive @@ -1095,26 +1088,20 @@ space, self, receiver, arguments, sender) return s_new - @make_elidable_after_versioning + @constant_for_version def getbytecode(self, pc): return self.bytecode[pc] class CachedObjectShadow(AbstractCachingShadow): + @elidable_for_version def fetch(self, n0): - jit.promote(self) - version = self.version - jit.promote(version) - return self.safe_fetch(n0, version) - - @jit.elidable - def safe_fetch(self, n0, version): - assert version is self.version return self._w_self._fetch(n0) def store(self, n0, w_value): - self.version = Version() - return self._w_self._store(n0, w_value) + res = self._w_self._store(n0, w_value) + self.changed() + return res def update(self): pass diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -308,7 +308,7 @@ for chunk in self.chunks.itervalues(): casted = chunk.g_object.w_object if isinstance(casted, model.W_PointersObject) and casted.has_shadow(): - casted._shadow.update() + casted.shadow.update() def init_compactclassesarray(self): """ from the blue book (CompiledMethod Symbol Array PseudoContext LargePositiveInteger nil MethodDictionary Association Point Rectangle nil TranslatedMethod BlockContext MethodContext nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil ) """ diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -2,8 +2,8 @@ import os # TODO: -from pypy.tool.jitlogparser.parser import SimpleParser, Op -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.parser import SimpleParser, Op +from rpython.tool.jitlogparser.storage import LoopStorage from rpython.jit.metainterp.resoperation import opname from rpython.jit.tool import oparser diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -368,6 +368,123 @@ jump(p0, p3, p8, i557, p538, i562, p18, i545, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, 1, p148, p717, i158, p156, p718, i165, p163, p146, i715, i179, p178, p719, i197, p188, p213, i221, p220, p228, p140, p242, i250, i252, i282, i293, i328, i315, i349, i510, p509, p538, p521, descr=TargetToken(169555520))] """) + # TODO: there shouldnt be allocations in this + def test_range_asOrderedCollection(self, spy, tmpdir): + traces = self.run(spy, tmpdir, + """ + (1 to: 10000) asOrderedCollection. + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + p173 = getarrayitem_gc(p53, 1, descr=), + i174 = getfield_gc_pure(p173, descr=), + i175 = int_ge(i174, i167), + guard_true(i175, descr=), + cond_call(i75, 4712800, p67, descr=), + cond_call(i103, 4712800, p91, descr=), + cond_call(i103, 4712800, p91, descr=), + p176 = getarrayitem_gc(p105, 0, descr=), + cond_call(i103, 4712800, p91, descr=), + p178 = new_with_vtable(ConstClass(W_SmallInteger)), + setfield_gc(p178, i167, descr=), + setarrayitem_gc(p105, 1, p178, descr=), + setarrayitem_gc(p79, 0, p176, descr=), + setfield_gc(p67, 2, descr=), + setfield_gc(p67, 15, descr=), + setfield_gc(p67, p0, descr=), + setfield_gc(ConstPtr(ptr81), i88, descr=), + setarrayitem_gc(p79, 1, p178, descr=), + guard_class(p176, 6040288, descr=), + p179 = getfield_gc(p176, descr=), + guard_value(p179, ConstPtr(ptr117), descr=), + p180 = getfield_gc(p176, descr=), + setarrayitem_gc(p79, 0, ConstPtr(null), descr=), + setfield_gc(p67, 0, descr=), + setfield_gc(ConstPtr(ptr81), i129, descr=), + setarrayitem_gc(p79, 1, ConstPtr(null), descr=), + guard_isnull(p180, descr=), + p183 = getfield_gc(p176, descr=), + guard_value(p183, ConstPtr(ptr133), descr=), + p184 = getfield_gc(p176, descr=), + p185 = getarrayitem_gc(p184, 2, descr=), + p186 = getarrayitem_gc(p184, 0, descr=), + guard_class(p186, 6040288, descr=), + p187 = getfield_gc(p186, descr=), + guard_value(p187, ConstPtr(ptr144), descr=), + p188 = getfield_gc(p186, descr=), + guard_isnull(p188, descr=), + p189 = getfield_gc(p186, descr=), + i190 = arraylen_gc(p189, descr=), + i191 = getfield_gc_pure(p185, descr=), + i192 = int_eq(i191, i190), + guard_false(i192, descr=), + i193 = int_add_ovf(i191, 1), + guard_no_overflow(descr=), + i194 = int_ge(i191, 0), + guard_true(i194, descr=), + i195 = int_lt(i191, i190), + guard_true(i195, descr=), + p196 = getfield_gc(p186, descr=), + guard_value(p196, ConstPtr(ptr156), descr=), + p197 = new_with_vtable(ConstClass(W_SmallInteger)), + setfield_gc(p197, i193, descr=), + setarrayitem_gc(p184, 2, p197, descr=), + setarrayitem_gc(p189, i191, p178, descr=), + p198 = getarrayitem_gc(p53, 2, descr=), + i199 = getfield_gc_pure(p198, descr=), + setarrayitem_gc(p79, 0, ConstPtr(null), descr=), + setfield_gc(p67, -1, descr=), + setfield_gc(p67, ConstPtr(null), descr=), + setfield_gc(ConstPtr(ptr81), i84, descr=), + i200 = int_add_ovf(i167, i199), + guard_no_overflow(descr=), + i201 = int_sub(i170, 8), + setfield_gc(ConstPtr(ptr81), i201, descr=), + i202 = int_le(i201, 0), + guard_false(i202, descr=), + i203 = arraylen_gc(p53, descr=), + i204 = arraylen_gc(p79, descr=), + i205 = arraylen_gc(p105, descr=), + jump(p0, p3, p6, i200, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p53, i75, p67, i103, p91, p105, p79, i88, i90, i129, i84, i201, descr=TargetToken(45055856))] + """) + + def test_indexOf(self, spy, tmpdir): + traces = self.run(spy, tmpdir, + """ + (1 to: 10000000) asOrderedCollection indexOf: 9999999. + """) + # First loop: asOrderedCollection, second loop: makeRoomAtLast + self.assert_matches(traces[2].loop, """ + guard_not_invalidated(descr=), + i127 = int_le(i121, i61), + guard_true(i127, descr=), + setfield_gc(ConstPtr(ptr74), i81, descr=), + i128 = int_add_ovf(i121, i91), + guard_no_overflow(descr=), + i129 = int_sub(i128, 1), + i130 = int_gt(i129, i97), + guard_false(i130, descr=), + i131 = int_sub(i129, 1), + i132 = int_ge(i131, 0), + guard_true(i132, descr=), + i133 = int_lt(i131, i110), + guard_true(i133, descr=), + p134 = getarrayitem_gc(p109, i131, descr=), + setfield_gc(ConstPtr(ptr74), i77, descr=), + guard_nonnull_class(p134, ConstClass(W_SmallInteger), descr=), + i135 = getfield_gc_pure(p134, descr=), + i136 = int_eq(i135, i118), + guard_false(i136, descr=), + i137 = int_add_ovf(i121, 1), + guard_no_overflow(descr=), + i138 = int_sub(i124, 6), + setfield_gc(ConstPtr(ptr74), i138, descr=), + i139 = int_le(i138, 0), + guard_false(i139, descr=), + i140 = arraylen_gc(p88, descr=), + jump(p0, p3, p6, p8, p10, i137, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i61, i81, i91, p63, p90, i67, i97, p96, p100, i110, p109, i77, i118, i138, p88, descr=TargetToken(45201344))] + """) + @py.test.mark.skipif("'just dozens of long traces'") def test_bitblt_draw_windows(self, spy, tmpdir): # This used to have a call to array comparison in it diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -57,12 +57,12 @@ assert space.get_special_selector(methname) is symbol s_class.installmethod(symbol, prim_meth) - assert space.w_nil._shadow is None + assert space.w_nil.shadow is None try: func(active_context) if active_context else func() finally: # Uninstall those methods: - assert space.w_nil._shadow is None + assert space.w_nil.shadow is None for (w_class, _, _, methname) in methods: s_class = w_class.as_class_get_shadow(space) s_class.update() diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -30,8 +30,7 @@ initialize_class(w("string").getclass(tools.space)) def perform_primitive(rcvr, w_selector, *args): - - code = rcvr.getclass(space)._shadow.lookup(w_selector).primitive() + code = rcvr.getclass(space).shadow.lookup(w_selector).primitive() assert code func = primitives.prim_holder.prim_table[code] s_frame = MockFrame([rcvr] + list(args)).as_context_get_shadow(space) @@ -52,7 +51,7 @@ try: w_selector = space.get_special_selector(selector) except Exception: - w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space)._shadow) + w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space).shadow) interp.trace=trace for i, v in enumerate(candidates): diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -407,7 +407,7 @@ w_o = space.wrap_list([1, 2, 3]) w_methoddict = w_o.shadow_of_my_class(space)._s_superclass._s_superclass.w_methoddict() w_methoddict.as_methoddict_get_shadow(space).sync_cache() - selectors_w = w_methoddict._shadow.methoddict.keys() + selectors_w = w_methoddict.shadow.methoddict.keys() w_sel = None for sel in selectors_w: if sel.as_string() == 'size': diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -8,7 +8,7 @@ # Heuristic to detect if this is a metaclass. Don't use apart # from in this test file, because classtable['w_Metaclass'] is # bogus after loading an image. - return w_cls.s_class is space.classtable['w_Metaclass']._shadow + return w_cls.s_class is space.classtable['w_Metaclass'].shadow def test_every_class_is_an_instance_of_a_metaclass(): for (nm, w_cls) in space.classtable.items(): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -23,8 +23,8 @@ self.s_class = space.w_MethodContext.as_class_get_shadow(space) def as_blockcontext_get_shadow(self): - self._shadow = shadow.BlockContextShadow(space, self) - return self._shadow + self.shadow = shadow.BlockContextShadow(space, self) + return self.shadow def wrap(x): if isinstance(x, int): return space.wrap_int(x) @@ -806,13 +806,13 @@ interp.image = Image() try: - monkeypatch.setattr(w_frame._shadow, "_sendSelfSelector", perform_mock) + monkeypatch.setattr(w_frame.shadow, "_sendSelfSelector", perform_mock) monkeypatch.setattr(bitblt.BitBltShadow, "sync_cache", sync_cache_mock) with py.test.raises(CallCopyBitsSimulation): prim_table[primitives.BITBLT_COPY_BITS](interp, w_frame.as_context_get_shadow(space), argument_count-1) finally: monkeypatch.undo() - assert w_frame._shadow.pop() is mock_bitblt # the receiver + assert w_frame.shadow.pop() is mock_bitblt # the receiver # Note: # primitives.NEXT is unimplemented as it is a performance optimization diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -171,11 +171,11 @@ w_object = blockcontext(pc=13) old_vars = w_object._vars s_object = w_object.as_blockcontext_get_shadow(space) - s_object._shadow = None + s_object.shadow = None s_newobject = w_object.as_blockcontext_get_shadow(space) assert ([s_newobject.fetch(i) for i in range(s_newobject.size())] == [s_object.fetch(i) for i in range(s_newobject.size())]) - assert w_object._shadow is s_newobject + assert w_object.shadow is s_newobject def test_compiledmethodshadow(): from test_model import joinbits @@ -222,7 +222,7 @@ assert o.notified assert w_o.fetch(space, 0) == 1 try: - w_o._shadow.notify(Observer()) + w_o.shadow.notify(Observer()) except RuntimeError: pass else: diff --git a/spyvm/version.py b/spyvm/version.py new file mode 100644 --- /dev/null +++ b/spyvm/version.py @@ -0,0 +1,40 @@ +from rpython.rlib import jit + +# This declares the decorated function as "pure" while the self-object +# has an unchanged version. Neither self nor self.version are promoted to constants. +def elidable_for_version(func): + @jit.elidable + def elidable_func(self, version, *args): + return func(self, *args) + def meth(self, *args): + return elidable_func(self, self.version, *args) + elidable_func.func_name = "elidable_" + func.func_name + meth.func_name = "elidable_meth_" + func.func_name + return meth + +# In addition to marking the decorated function as "pure", both the receiver +# and the version of the receiver are promoted to constants. This should only +# be used in situations where the receiver is very unlikely to change in the same +# context of the interpreted program (like classes or compiled methods). +def constant_for_version(func): + @jit.elidable + def elidable_func(self, version, *args): + return func(self, *args) + def meth(self, *args): + self = jit.promote(self) + version = jit.promote(self.version) + return elidable_func(self, version, *args) + return meth + +class Version(object): + pass + +class VersionMixin(object): + # Concrete class must define a pseudo immutable field like the following: + # _attrs_ = ['version'] + # _immutable_fields_ = ['version?'] + + version = Version() + + def changed(self): + self.version = Version() From noreply at buildbot.pypy.org Fri May 2 12:52:22 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Fri, 2 May 2014 12:52:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk 64bit-c2: STM GC has no support for destructors atm. Message-ID: <20140502105222.6B66C1C00B9@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: 64bit-c2 Changeset: r797:0e0c70ccb883 Date: 2014-05-02 12:44 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0e0c70ccb883/ Log: STM GC has no support for destructors atm. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1076,7 +1076,8 @@ return self._real_depth_buffer def __del__(self): - lltype.free(self._real_depth_buffer, flavor='raw') + pass + #lltype.free(self._real_depth_buffer, flavor='raw') class W_16BitDisplayBitmap(W_DisplayBitmap): From noreply at buildbot.pypy.org Fri May 2 13:33:27 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 13:33:27 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add some text Message-ID: <20140502113327.4F79C1C0A66@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5220:c40d093dd7ec Date: 2014-05-02 13:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/c40d093dd7ec/ Log: add some text diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -390,13 +390,18 @@ simple memory model (sequential consistency) and a composable way to synchronise memory accesses using atomic blocks. -Unfortunately, STM has a big performance problem. One way to approach -this problem is to make STM systems that make better use of low-level -features in existing OS kernels. -We are currently working on a STM system that makes use of -several such features like virtual memory and memory segmentation. -We further tailor the system to the discussed use case which gives us -an advantage over other STM systems that are more general. With this +Unfortunately, STM has a big performance problem. Particularly, for +our use case there is not much static information available since we +are executing a program only known at runtime. Additionally, replacing +the GIL means running everything in transactions, so there is not much +code that can run outside and be optimized better. + +One way to get more performance is to make STM systems that make +better use of low-level features in existing OS kernels. We are +currently working on a STM system that makes use of several such +features like virtual memory and memory segmentation. We further +tailor the system to the discussed use case which gives us an +advantage over other STM systems that are more general. With this approach, initial results suggest that we can keep the overhead of STM below 50\%. A hybrid TM system, which also uses HTM to accelerate certain tasks, looks like a very promising direction of research From noreply at buildbot.pypy.org Fri May 2 13:46:46 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 13:46:46 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: minor changes Message-ID: <20140502114646.8D4141C088E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5221:b88e3053f8a9 Date: 2014-05-02 13:46 +0200 http://bitbucket.org/pypy/extradoc/changeset/b88e3053f8a9/ Log: minor changes diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -393,10 +393,11 @@ Unfortunately, STM has a big performance problem. Particularly, for our use case there is not much static information available since we are executing a program only known at runtime. Additionally, replacing -the GIL means running everything in transactions, so there is not much -code that can run outside and be optimized better. +the GIL means running every part of the application in transactions, +so there is not much code that can run outside and that can be +optimized better. The performance of the TM system is vital. -One way to get more performance is to make STM systems that make +One way to get more performance is to develop STM systems that make better use of low-level features in existing OS kernels. We are currently working on a STM system that makes use of several such features like virtual memory and memory segmentation. We further @@ -405,8 +406,8 @@ approach, initial results suggest that we can keep the overhead of STM below 50\%. A hybrid TM system, which also uses HTM to accelerate certain tasks, looks like a very promising direction of research -too. In general we believe that further work to reduce the overhead of -STM is very worthwhile. +too. We believe that further work to reduce the overhead of STM is +very worthwhile. From noreply at buildbot.pypy.org Fri May 2 13:53:27 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 13:53:27 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: additional cite Message-ID: <20140502115327.1EBA61C244E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5222:5d9030322a0b Date: 2014-05-02 13:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/5d9030322a0b/ Log: additional cite diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -322,9 +322,9 @@ systems often scale very well to a big number of threads and eventually overtake the single-threaded execution, they often provide no benefits at all for low numbers of threads (1-8). There are some -attempts \cite{warmhoff13} that can reduce the overhead a lot, but -also scale very badly so that their benefit on more than one thread is -little. +attempts \cite{warmhoff13,spear09} that can reduce the overhead a lot, +but scale badly or only for certain workloads. Often the benefits +on more than one thread are too little in real world applications. However, STM compared to HTM does not suffer from the same restricting limitations. Transactions can be arbitrarily long. This makes it @@ -481,6 +481,10 @@ symposium on Principles of programming languages (POPL '06)}. ACM, New York, NY, USA +\bibitem{spear09} + Spear, Michael F., et al. "Transactional mutex locks." \emph{SIGPLAN + Workshop on Transactional Computing.} 2009. + \end{thebibliography} From noreply at buildbot.pypy.org Fri May 2 14:45:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 May 2014 14:45:17 +0200 (CEST) Subject: [pypy-commit] pypy default: fixup (arigato) Message-ID: <20140502124517.9810E1C00B9@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71195:67fb13391dc6 Date: 2014-05-02 12:45 +0000 http://bitbucket.org/pypy/pypy/changeset/67fb13391dc6/ Log: fixup (arigato) diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -18,17 +18,18 @@ http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy -project. We showed quite a bit of progress on all three projects (see below) -and we're slowly running out of funds. -Please consider donating more so we can finish those projects! The three -projects are: +project, and for those who donate to our three sub-projects. +We showed quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: * `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. * `STM`_ (software transactional memory): a preview will be released very soon, once we fix a few bugs -* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ which is included in the PyPy 2.3 release. More details below. .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html @@ -44,8 +45,8 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; -note that the latest cpython is not faster than cpython 2.7.2) +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) due to its integrated tracing JIT compiler. This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, @@ -56,13 +57,13 @@ bit python is still stalling, we would welcome a volunteer to `handle that`_. -.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org .. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -Bugfixes +Bugfixes -------- Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider @@ -71,7 +72,7 @@ * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. -* Generators are faster since they now skip unecessary cleanup +* Generators are faster since they now skip unnecessary cleanup * A first time contributor simplified JIT traces by adding integer bound propagation in indexing and logical operations. @@ -84,6 +85,8 @@ * Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Support for corner cases on objects with __int__ and __float__ methods + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features @@ -97,8 +100,6 @@ * Support for precompiled headers in the build process for MSVC -* Support for objects with __int__ and __float__ methods - * Tweak support of errno in cpyext (the PyPy implemenation of the capi) @@ -127,8 +128,12 @@ * A cffi-based ``numpy.random`` module is available as a branch in the numpy repository, it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load + memory operations used in numpy arrays. Further work remains here in virtualizing the + alloc_raw_storage when possible. This will allow scalars to have storages but still be + virtualized when possible in loops. Cheers + The PyPy Team From noreply at buildbot.pypy.org Fri May 2 15:30:11 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 2 May 2014 15:30:11 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add citation Message-ID: <20140502133011.498D81C01CB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5223:bd5dc3629fa4 Date: 2014-05-02 15:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/bd5dc3629fa4/ Log: add citation diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -13,7 +13,6 @@ \usepackage{hyperref} \usepackage{amsmath} - \begin{document} \special{papersize=8.5in,11in} @@ -132,19 +131,19 @@ \subsection{Why is there a GIL?} The GIL is a very simple synchronisation mechanism for supporting -multithreading in the interpreter. The basic guarantee is that the -GIL may only be released in-between bytecode instructions. The -interpreter can thus rely on complete isolation and atomicity of these +multithreading in the interpreter. The basic guarantee is that the GIL +may only be released in-between bytecode instructions. The interpreter +can thus rely on complete isolation and atomicity of these instructions. Additionally, it provides the application with a -sequential consistency model. As a consequence, applications can rely -on certain operations to be atomic and that they will always be -executed in the order in which they appear in the code. While -depending on this may not always be a good idea, it is done in -practice. A GIL-replacement should therefore uphold these +sequential consistency model\cite{lamport79}. As a consequence, +applications can rely on certain operations to be atomic and that they +will always be executed in the order in which they appear in the +code. While depending on this may not always be a good idea, it is +done in practice. A GIL-replacement should therefore uphold these guarantees, while preferably also be as easily implementable as a GIL for the interpreter. -[xxx mention that the interpreter is typically very large and maintained -by open-source communities] +[xxx mention that the interpreter is typically + very large and maintained by open-source communities] The GIL also allows for easy integration with external C libraries that may not be thread-safe. For the duration of the calls, we @@ -350,9 +349,10 @@ %% - unlimited transaction length (easy atomic blocks) + \section{The Way Forward} -\begin{table*}[!ht] +\begin{table*}[h] \centering \begin{tabular}{|l|c|c|c|c|c|} \hline @@ -422,9 +422,9 @@ %% This is the text of the appendix, if you need one. -\acks +%% \acks -Acknowledgements... +%% Acknowledgements... % We recommend abbrvnat bibliography style. @@ -485,6 +485,12 @@ Spear, Michael F., et al. "Transactional mutex locks." \emph{SIGPLAN Workshop on Transactional Computing.} 2009. +\bibitem{lamport79} + Lamport, Leslie. "How to make a multiprocessor computer that + correctly executes multiprocess programs." \emph{Computers, IEEE + Transactions} on 100.9 (1979): 690-691. + + \end{thebibliography} From noreply at buildbot.pypy.org Fri May 2 17:04:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 17:04:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Bah, can't pass anything via r11 across the call --- the CALL instruction itself may need r11 Message-ID: <20140502150450.E073F1C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71196:34b987aa50c1 Date: 2014-05-02 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/34b987aa50c1/ Log: Bah, can't pass anything via r11 across the call --- the CALL instruction itself may need r11 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -395,10 +395,6 @@ mc = codebuf.MachineCodeBlockWrapper() # if not for_frame: - if self.cpu.gc_ll_descr.stm: - assert IS_X86_64 - mc.PUSH_r(X86_64_SCRATCH_REG.value) - # self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) # if self.cpu.gc_ll_descr.stm: @@ -407,7 +403,7 @@ # current 'stm_location' so that it is found. The easiest # is to simply push it on the shadowstack, from its source # location as two extra arguments on the machine stack - # (at this point containing: [ref][retaddr][num][obj]...) + # (at this point containing: [retaddr][ref][num][obj]...) # XXX this should also be done if 'for_frame' is true... mc.MOV(esi, self.heap_shadowstack_top()) mc.MOV_rs(edi.value, 2 * WORD) # [num] @@ -416,9 +412,8 @@ mc.LEA_ra(edi.value, (self.SEGMENT_NO, rx86.NO_BASE_REGISTER, edi.value, 1, +1)) mc.MOV_mr((self.SEGMENT_NO, esi.value, 0), edi.value) - mc.MOV_rs(edi.value, 0 * WORD) # [ref] + mc.MOV_rs(edi.value, 1 * WORD) # [ref] mc.MOV_mr((self.SEGMENT_NO, esi.value, WORD), edi.value) - mc.MOV_sr(0 * WORD, esi.value) # save org shadowstack_top mc.LEA_rm(esi.value, (self.SEGMENT_NO, esi.value, 2 * WORD)) mc.MOV(self.heap_shadowstack_top(), esi) mc.MOV_rs(edi.value, 3 * WORD) # [obj] @@ -466,13 +461,17 @@ # if not for_frame: + if self.cpu.gc_ll_descr.stm: + # SUB touches CPU flags + mc.MOV(esi, self.heap_shadowstack_top()) + mc.LEA_rm(esi.value, (self.SEGMENT_NO, esi.value, -2 * WORD)) + mc.MOV(self.heap_shadowstack_top(), esi) if IS_X86_32: # ADD touches CPU flags mc.LEA_rs(esp.value, 2 * WORD) self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) if self.cpu.gc_ll_descr.stm: - mc.POP(self.heap_shadowstack_top()) - mc.RET16_i(2 * WORD) + mc.RET16_i(3 * WORD) else: mc.RET16_i(WORD) else: @@ -2269,7 +2268,7 @@ num, ref = extract_raw_stm_location( self._regalloc.stm_location) mc.PUSH(imm(rffi.cast(lltype.Signed, num))) - mc.MOV(X86_64_SCRATCH_REG, imm(rffi.cast(lltype.Signed, ref))) + mc.PUSH(imm(rffi.cast(lltype.Signed, ref))) if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address mc.CALL(imm(self.wb_slowpath[helper_num])) From noreply at buildbot.pypy.org Fri May 2 17:10:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 May 2014 17:10:14 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into branch Message-ID: <20140502151014.0FFA51C003C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71197:119b5f4c257f Date: 2014-05-02 15:00 +0300 http://bitbucket.org/pypy/pypy/changeset/119b5f4c257f/ Log: merge default into branch diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.__code__: + if entry.code == f1.func_code: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.__code__] - ebar = entries[bar.__code__] + efoo = entries[foo.func_code] + ebar = entries[bar.func_code] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -219,12 +219,12 @@ lines.remove(line) break else: - print('NOT FOUND: %s' % pattern.rstrip('\n')) - print('--- GOT ---') - print(got) - print() - print('--- EXPECTED ---') - print(expected) + print 'NOT FOUND:', pattern.rstrip('\n') + print '--- GOT ---' + print got + print + print '--- EXPECTED ---' + print expected assert False assert not lines finally: diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestCProfile(BaseTestPyPyC): @@ -26,6 +26,10 @@ for method in ['append', 'pop']: loop, = log.loops_by_id(method) print loop.ops_by_id(method) - assert ' call(' not in repr(loop.ops_by_id(method)) + # on 32-bit, there is f1=read_timestamp(); ...; + # f2=read_timestamp(); f3=call(llong_sub,f1,f2) + # which should turn into a single PADDQ/PSUBQ + if sys.maxint != 2147483647: + assert ' call(' not in repr(loop.ops_by_id(method)) assert ' call_may_force(' not in repr(loop.ops_by_id(method)) assert ' cond_call(' in repr(loop.ops_by_id(method)) From noreply at buildbot.pypy.org Fri May 2 17:10:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 May 2014 17:10:15 +0200 (CEST) Subject: [pypy-commit] pypy default: skip tests on 32 bit Message-ID: <20140502151015.305AC1C003C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71198:c03087ca1843 Date: 2014-05-02 18:07 +0300 http://bitbucket.org/pypy/pypy/changeset/c03087ca1843/ Log: skip tests on 32 bit diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,6 +7,8 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") + if sys.maxsize < 2 ** 31: + py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") @@ -30,7 +32,7 @@ def test02_instance_data_read_access(self): """Test read access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -117,7 +119,7 @@ def test03_instance_data_write_access(self): """Test write access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -489,14 +491,14 @@ import cppyy four_vector = cppyy.gbl.four_vector - + t1 = four_vector(1., 2., 3., -4.) t2 = four_vector(0., 0., 0., 0.) t3 = four_vector(t1) - + assert t1 == t3 assert t1 != t2 - + for i in range(4): assert t1[i] == t3[i] @@ -625,8 +627,8 @@ def test18_object_and_pointer_comparisons(self): """Verify object and pointer comparisons""" - - import cppyy + + import cppyy gbl = cppyy.gbl c1 = cppyy.bind_object(0, gbl.cppyy_test_data) @@ -662,11 +664,11 @@ def test19_object_validity(self): """Test object validity checking""" - + from cppyy import gbl d = gbl.cppyy_test_pod() - + assert d assert not not d From noreply at buildbot.pypy.org Fri May 2 17:10:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 2 May 2014 17:10:16 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into branch Message-ID: <20140502151016.595951C003C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71199:6c9fdc71640b Date: 2014-05-02 18:09 +0300 http://bitbucket.org/pypy/pypy/changeset/6c9fdc71640b/ Log: merge default into branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -18,17 +18,18 @@ http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy -project. We showed quite a bit of progress on all three projects (see below) -and we're slowly running out of funds. -Please consider donating more so we can finish those projects! The three -projects are: +project, and for those who donate to our three sub-projects. +We showed quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: * `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. * `STM`_ (software transactional memory): a preview will be released very soon, once we fix a few bugs -* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ which is included in the PyPy 2.3 release. More details below. .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html @@ -44,8 +45,8 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; -note that the latest cpython is not faster than cpython 2.7.2) +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) due to its integrated tracing JIT compiler. This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, @@ -56,13 +57,13 @@ bit python is still stalling, we would welcome a volunteer to `handle that`_. -.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org .. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -Bugfixes +Bugfixes -------- Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider @@ -71,7 +72,7 @@ * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. -* Generators are faster since they now skip unecessary cleanup +* Generators are faster since they now skip unnecessary cleanup * A first time contributor simplified JIT traces by adding integer bound propagation in indexing and logical operations. @@ -84,6 +85,8 @@ * Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Support for corner cases on objects with __int__ and __float__ methods + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features @@ -97,8 +100,6 @@ * Support for precompiled headers in the build process for MSVC -* Support for objects with __int__ and __float__ methods - * Tweak support of errno in cpyext (the PyPy implemenation of the capi) @@ -127,8 +128,12 @@ * A cffi-based ``numpy.random`` module is available as a branch in the numpy repository, it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load + memory operations used in numpy arrays. Further work remains here in virtualizing the + alloc_raw_storage when possible. This will allow scalars to have storages but still be + virtualized when possible in loops. Cheers + The PyPy Team diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,6 +7,8 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") + if sys.maxsize < 2 ** 31: + py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") @@ -30,7 +32,7 @@ def test02_instance_data_read_access(self): """Test read access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -117,7 +119,7 @@ def test03_instance_data_write_access(self): """Test write access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -489,14 +491,14 @@ import cppyy four_vector = cppyy.gbl.four_vector - + t1 = four_vector(1., 2., 3., -4.) t2 = four_vector(0., 0., 0., 0.) t3 = four_vector(t1) - + assert t1 == t3 assert t1 != t2 - + for i in range(4): assert t1[i] == t3[i] @@ -625,8 +627,8 @@ def test18_object_and_pointer_comparisons(self): """Verify object and pointer comparisons""" - - import cppyy + + import cppyy gbl = cppyy.gbl c1 = cppyy.bind_object(0, gbl.cppyy_test_data) @@ -662,11 +664,11 @@ def test19_object_validity(self): """Test object validity checking""" - + from cppyy import gbl d = gbl.cppyy_test_pod() - + assert d assert not not d From noreply at buildbot.pypy.org Fri May 2 17:46:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 17:46:50 +0200 (CEST) Subject: [pypy-commit] stmgc marker: More symmetrically, put also in the "other" Message-ID: <20140502154650.C6ABB1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1194:937201ff1335 Date: 2014-05-02 17:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/937201ff1335/ Log: More symmetrically, put also in the "other" field when reporting a contention where "self" writes and "other" reads. diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -165,32 +165,27 @@ /* For some categories, we can also collect the relevant information for the other segment. */ + char *outmarker = abort_other ? other_pseg->marker_self + : my_pseg->marker_other; switch (kind) { case WRITE_WRITE_CONTENTION: marker_fetch_obj_write(other_segment_num, obj, other_marker); + marker_expand(other_marker, other_segment_base, outmarker); break; case INEVITABLE_CONTENTION: assert(abort_other == false); other_marker[0] = other_pseg->marker_inev[0]; other_marker[1] = other_pseg->marker_inev[1]; + marker_expand(other_marker, other_segment_base, outmarker); break; + case WRITE_READ_CONTENTION: + strcpy(outmarker, ""); + break; default: - other_marker[0] = 0; - other_marker[1] = 0; + outmarker[0] = 0; break; } - marker_expand(other_marker, other_segment_base, - abort_other ? other_pseg->marker_self - : my_pseg->marker_other); - - if (abort_other && other_pseg->marker_self[0] == 0) { - if (kind == WRITE_READ_CONTENTION) - strcpy(other_pseg->marker_self, ""); - else - strcpy(other_pseg->marker_self, ""); - } - release_marker_lock(other_segment_base); } diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -260,7 +260,8 @@ tl = self.get_stm_thread_local() assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ assert ffi.string(tl.longest_marker_self) == '19' - assert ffi.string(tl.longest_marker_other) == '' + assert ffi.string(tl.longest_marker_other) == ( + '') def test_double_remote_markers_cb_write_write(self): @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") From noreply at buildbot.pypy.org Fri May 2 18:31:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 2 May 2014 18:31:38 +0200 (CEST) Subject: [pypy-commit] pypy default: readd less controversial py3k compat Message-ID: <20140502163138.EA67A1C003C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71200:2b5234972cd9 Date: 2014-05-02 09:30 -0700 http://bitbucket.org/pypy/pypy/changeset/2b5234972cd9/ Log: readd less controversial py3k compat diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.func_code: + if entry.code == f1.__code__: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() From noreply at buildbot.pypy.org Fri May 2 18:33:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 18:33:41 +0200 (CEST) Subject: [pypy-commit] pypy default: redo test_cprofile py3k compat so it works the same Message-ID: <20140502163341.CBBCD1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71201:e89381f9b054 Date: 2014-05-02 12:32 -0400 http://bitbucket.org/pypy/pypy/changeset/e89381f9b054/ Log: redo test_cprofile py3k compat so it works the same diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.func_code: + if entry.code == f1.__code__: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -219,12 +219,12 @@ lines.remove(line) break else: - print 'NOT FOUND:', pattern.rstrip('\n') - print '--- GOT ---' - print got - print - print '--- EXPECTED ---' - print expected + print('NOT FOUND: %s' % pattern.rstrip('\n')) + print('--- GOT ---') + print(got) + print('') + print('--- EXPECTED ---') + print(expected) assert False assert not lines finally: From noreply at buildbot.pypy.org Fri May 2 18:33:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 18:33:42 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140502163342.E0B251C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71202:b635f1afc5ce Date: 2014-05-02 12:33 -0400 http://bitbucket.org/pypy/pypy/changeset/b635f1afc5ce/ Log: merge heads From noreply at buildbot.pypy.org Fri May 2 19:50:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 19:50:35 +0200 (CEST) Subject: [pypy-commit] pypy default: support __future__ flags in gateway appdef from func obj Message-ID: <20140502175035.E43401C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71203:b72c31dbc037 Date: 2014-05-02 13:35 -0400 http://bitbucket.org/pypy/pypy/changeset/b72c31dbc037/ Log: support __future__ flags in gateway appdef from func obj diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1074,7 +1074,9 @@ return x+y ''') """ + prefix = "" if not isinstance(source, str): + flags = source.__code__.co_flags source = py.std.inspect.getsource(source).lstrip() while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function @@ -1083,12 +1085,21 @@ source = source[source.find('\n') + 1:].lstrip() assert source.startswith("def "), "can only transform functions" source = source[4:] + import __future__ + if flags & __future__.CO_FUTURE_DIVISION: + prefix += "from __future__ import division\n" + if flags & __future__.CO_FUTURE_ABSOLUTE_IMPORT: + prefix += "from __future__ import absolute_import\n" + if flags & __future__.CO_FUTURE_PRINT_FUNCTION: + prefix += "from __future__ import print_function\n" + if flags & __future__.CO_FUTURE_UNICODE_LITERALS: + prefix += "from __future__ import unicode_literals\n" p = source.find('(') assert p >= 0 funcname = source[:p].strip() source = source[p:] assert source.strip() - funcsource = "def %s%s\n" % (funcname, source) + funcsource = prefix + "def %s%s\n" % (funcname, source) #for debugging of wrong source code: py.std.parser.suite(funcsource) a = applevel(funcsource, filename=filename) return a.interphook(funcname) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -1,18 +1,20 @@ - # -*- coding: utf-8 -*- +from __future__ import division, print_function # for test_app2interp_future from pypy.interpreter import gateway, argument from pypy.interpreter.gateway import ObjSpace, W_Root, WrappedDefault from pypy.interpreter.signature import Signature import py import sys + class FakeFunc(object): def __init__(self, space, name): self.space = space self.name = name self.defs_w = [] + class TestBuiltinCode: def test_signature(self): def c(space, w_x, w_y, hello_w): @@ -89,8 +91,8 @@ w_result = code.funcrun(FakeFunc(self.space, "c"), args) assert self.space.eq_w(w_result, w(1020)) + class TestGateway: - def test_app2interp(self): w = self.space.wrap def app_g3(a, b): @@ -117,6 +119,14 @@ args = gateway.Arguments(self.space, [w(6)], ['hello', 'world'], [w(7), w(8)]) assert self.space.int_w(gg(self.space, w(3), args)) == 213 + def test_app2interp_future(self): + w = self.space.wrap + def app_g3(a, b): + print(end='') + return a / b + g3 = gateway.app2interp_temp(app_g3) + assert self.space.eq_w(g3(self.space, w(1), w(4),), w(0.25)) + def test_interp2app(self): space = self.space w = space.wrap @@ -616,7 +626,7 @@ w_app_f = self.space.wrap(app_f) assert isinstance(w_app_f.code, gateway.BuiltinCode2) - + called = [] fastcall_2 = w_app_f.code.fastcall_2 def witness_fastcall_2(space, w_func, w_a, w_b): @@ -736,7 +746,6 @@ class TestPassThroughArguments: - def test_pass_trough_arguments0(self): space = self.space @@ -834,7 +843,6 @@ class AppTestKeywordsToBuiltinSanity(object): - def test_type(self): class X(object): def __init__(self, **kw): @@ -873,4 +881,3 @@ d.update(**{clash: 33}) dict.update(d, **{clash: 33}) - From noreply at buildbot.pypy.org Fri May 2 21:39:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 21:39:11 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix cpyext slot wrapper repr Message-ID: <20140502193911.B35E91C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71205:f1afb02b246a Date: 2014-05-02 15:31 -0400 http://bitbucket.org/pypy/pypy/changeset/f1afb02b246a/ Log: fix cpyext slot wrapper repr diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -174,7 +174,7 @@ def descr_method_repr(self): return self.space.wrap("" % (self.method_name, - self.w_objclass.getname(self.space))) + self.w_objclass.name)) def cwrapper_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCWrapperObject, w_self) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -33,7 +33,7 @@ assert "copy" in repr(module.fooType.copy) assert repr(module.fooType) == "" assert repr(obj2) == "" - assert repr(module.fooType.__call__) == "" + assert repr(module.fooType.__call__) == "" assert obj2(foo=1, bar=2) == dict(foo=1, bar=2) print(obj.foo) From noreply at buildbot.pypy.org Fri May 2 21:39:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 21:39:10 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix tp_name of cpyext typeobjects Message-ID: <20140502193910.78F621C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71204:2af214ac9f7e Date: 2014-05-02 15:31 -0400 http://bitbucket.org/pypy/pypy/changeset/2af214ac9f7e/ Log: fix tp_name of cpyext typeobjects diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -291,14 +291,9 @@ convert_getset_defs(space, dict_w, pto.c_tp_getset, self) convert_member_defs(space, dict_w, pto.c_tp_members, self) - full_name = rffi.charp2str(pto.c_tp_name) - if '.' in full_name: - module_name, extension_name = rsplit(full_name, ".", 1) - dict_w["__module__"] = space.wrap(module_name) - else: - extension_name = full_name + name = rffi.charp2str(pto.c_tp_name) - W_TypeObject.__init__(self, space, extension_name, + W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w) if not space.is_true(space.issubtype(self, space.w_type)): self.flag_cpytype = True From noreply at buildbot.pypy.org Fri May 2 22:55:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 May 2014 22:55:09 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a suggestion Message-ID: <20140502205509.9BDB81C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5224:5fdf06cfaac9 Date: 2014-05-02 22:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/5fdf06cfaac9/ Log: Add a suggestion diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -343,6 +343,10 @@ a whole program analysis since locks are inherently non-composable. The effectiveness of these approaches still has to be proven for our use case. +[xxx or maybe: "The effectiveness of these approaches is doubtful in our +use case --- for example, it makes it close to impossible to order the +locks consistently or to know in advance which locks a transaction will +need.] %% - overhead (100-1000\%) (barrier reference resolution, kills performance on low \#cpu) %% (FastLane: low overhead, not much gain)\\ From noreply at buildbot.pypy.org Fri May 2 23:20:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 2 May 2014 23:20:19 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: fix translation Message-ID: <20140502212019.720A51C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71206:af335604eacc Date: 2014-05-02 17:19 -0400 http://bitbucket.org/pypy/pypy/changeset/af335604eacc/ Log: fix translation diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -6,6 +6,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, interp_attrproperty, interp_attrproperty_w) +from pypy.objspace.std.typeobject import W_TypeObject from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, @@ -158,7 +159,9 @@ self.doc = doc self.func = func pyo = rffi.cast(PyObject, pto) - self.w_objclass = from_ref(space, pyo) + w_type = from_ref(space, pyo) + assert isinstance(w_type, W_TypeObject) + self.w_objclass = w_type def call(self, space, w_self, w_args, w_kw): if self.wrapper_func is None: diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -513,7 +513,7 @@ from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: - pto.c_tp_name = rffi.str2charp(w_type.getname(space)) + pto.c_tp_name = rffi.str2charp(w_type.name) pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 # uninitialized fields: From noreply at buildbot.pypy.org Fri May 2 23:37:40 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 2 May 2014 23:37:40 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch; re-enable test_datatypes.py on 32b for testing Message-ID: <20140502213740.E58E21C003C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71207:c31d7ee65574 Date: 2014-05-02 13:46 -0700 http://bitbucket.org/pypy/pypy/changeset/c31d7ee65574/ Log: merge default into branch; re-enable test_datatypes.py on 32b for testing diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -18,17 +18,18 @@ http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy -project. We showed quite a bit of progress on all three projects (see below) -and we're slowly running out of funds. -Please consider donating more so we can finish those projects! The three -projects are: +project, and for those who donate to our three sub-projects. +We showed quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: * `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. * `STM`_ (software transactional memory): a preview will be released very soon, once we fix a few bugs -* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ which is included in the PyPy 2.3 release. More details below. .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html @@ -44,8 +45,8 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; -note that the latest cpython is not faster than cpython 2.7.2) +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) due to its integrated tracing JIT compiler. This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, @@ -56,13 +57,13 @@ bit python is still stalling, we would welcome a volunteer to `handle that`_. -.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org .. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -Bugfixes +Bugfixes -------- Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider @@ -71,7 +72,7 @@ * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. -* Generators are faster since they now skip unecessary cleanup +* Generators are faster since they now skip unnecessary cleanup * A first time contributor simplified JIT traces by adding integer bound propagation in indexing and logical operations. @@ -84,6 +85,8 @@ * Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Support for corner cases on objects with __int__ and __float__ methods + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features @@ -97,8 +100,6 @@ * Support for precompiled headers in the build process for MSVC -* Support for objects with __int__ and __float__ methods - * Tweak support of errno in cpyext (the PyPy implemenation of the capi) @@ -127,8 +128,12 @@ * A cffi-based ``numpy.random`` module is available as a branch in the numpy repository, it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load + memory operations used in numpy arrays. Further work remains here in virtualizing the + alloc_raw_storage when possible. This will allow scalars to have storages but still be + virtualized when possible in loops. Cheers + The PyPy Team diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -161,3 +161,7 @@ .. branch: refactor-buffer-api Properly implement old/new buffer API for objects and start work on replacing bufferstr usage + +.. branch: issue1430 +Add a lock for unsafe calls to gethostbyname and gethostbyaddr + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0524dae88c75 +.. startrev: 0f75ad4d14ce - diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -692,23 +692,17 @@ def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" - if self.config.objspace.usemodules.thread: - # we use a sub-function to avoid putting the 'import' statement - # here, where the flow space would see it even if thread=False - return self.__allocate_lock() - else: - return dummy_lock - - def __allocate_lock(self): - from rpython.rlib.rthread import allocate_lock, error + from rpython.rlib import rthread + if not self.config.objspace.usemodules.thread: + return rthread.dummy_lock # hack: we can't have prebuilt locks if we're translating. # In this special situation we should just not lock at all # (translation is not multithreaded anyway). if not we_are_translated() and self.config.translating: raise CannotHaveLock() try: - return allocate_lock() - except error: + return rthread.allocate_lock() + except rthread.error: raise OperationError(self.w_RuntimeError, self.wrap("out of resources")) @@ -1415,10 +1409,10 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - name = "None" + e = oefmt(self.w_TypeError, "must be %s, not None", expected) else: - name = self.type(w_obj).get_module_type_name() - raise oefmt(self.w_TypeError, "must be %s, not %s", expected, name) + e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + raise e @specialize.arg(1) def getarg_w(self, code, w_obj): @@ -1722,24 +1716,6 @@ return space.getitem(w_glob, space.wrap('anonymous')) -class DummyLock(object): - def acquire(self, flag): - return True - - def release(self): - pass - - def _freeze_(self): - return True - - def __enter__(self): - pass - - def __exit__(self, *args): - pass - -dummy_lock = DummyLock() - # Table describing the regular part of the interface of object spaces, # namely all methods which only take w_ arguments and return a w_ result # (if any). diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -362,9 +362,9 @@ value = getattr(self, attr) if fmt == 'R': result = space.str_w(space.repr(value)) - elif fmt in 'NT': - if fmt == 'T': - value = space.type(value) + elif fmt == 'T': + result = space.type(value).get_module_type_name() + elif fmt == 'N': result = value.getname(space) else: result = str(value) @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).getname(space) + %T - The result of space.type(w_arg).get_module_type_name() """ if not len(args): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1074,7 +1074,9 @@ return x+y ''') """ + prefix = "" if not isinstance(source, str): + flags = source.__code__.co_flags source = py.std.inspect.getsource(source).lstrip() while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function @@ -1083,12 +1085,21 @@ source = source[source.find('\n') + 1:].lstrip() assert source.startswith("def "), "can only transform functions" source = source[4:] + import __future__ + if flags & __future__.CO_FUTURE_DIVISION: + prefix += "from __future__ import division\n" + if flags & __future__.CO_FUTURE_ABSOLUTE_IMPORT: + prefix += "from __future__ import absolute_import\n" + if flags & __future__.CO_FUTURE_PRINT_FUNCTION: + prefix += "from __future__ import print_function\n" + if flags & __future__.CO_FUTURE_UNICODE_LITERALS: + prefix += "from __future__ import unicode_literals\n" p = source.find('(') assert p >= 0 funcname = source[:p].strip() source = source[p:] assert source.strip() - funcsource = "def %s%s\n" % (funcname, source) + funcsource = prefix + "def %s%s\n" % (funcname, source) #for debugging of wrong source code: py.std.parser.suite(funcsource) a = applevel(funcsource, filename=filename) return a.interphook(funcname) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -1,18 +1,20 @@ - # -*- coding: utf-8 -*- +from __future__ import division, print_function # for test_app2interp_future from pypy.interpreter import gateway, argument from pypy.interpreter.gateway import ObjSpace, W_Root, WrappedDefault from pypy.interpreter.signature import Signature import py import sys + class FakeFunc(object): def __init__(self, space, name): self.space = space self.name = name self.defs_w = [] + class TestBuiltinCode: def test_signature(self): def c(space, w_x, w_y, hello_w): @@ -89,8 +91,8 @@ w_result = code.funcrun(FakeFunc(self.space, "c"), args) assert self.space.eq_w(w_result, w(1020)) + class TestGateway: - def test_app2interp(self): w = self.space.wrap def app_g3(a, b): @@ -117,6 +119,14 @@ args = gateway.Arguments(self.space, [w(6)], ['hello', 'world'], [w(7), w(8)]) assert self.space.int_w(gg(self.space, w(3), args)) == 213 + def test_app2interp_future(self): + w = self.space.wrap + def app_g3(a, b): + print(end='') + return a / b + g3 = gateway.app2interp_temp(app_g3) + assert self.space.eq_w(g3(self.space, w(1), w(4),), w(0.25)) + def test_interp2app(self): space = self.space w = space.wrap @@ -616,7 +626,7 @@ w_app_f = self.space.wrap(app_f) assert isinstance(w_app_f.code, gateway.BuiltinCode2) - + called = [] fastcall_2 = w_app_f.code.fastcall_2 def witness_fastcall_2(space, w_func, w_a, w_b): @@ -736,7 +746,6 @@ class TestPassThroughArguments: - def test_pass_trough_arguments0(self): space = self.space @@ -834,7 +843,6 @@ class AppTestKeywordsToBuiltinSanity(object): - def test_type(self): class X(object): def __init__(self, **kw): @@ -873,4 +881,3 @@ d.update(**{clash: 33}) dict.update(d, **{clash: 33}) - diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -19,6 +19,11 @@ ast = self.ast assert isinstance(ast.__version__, str) + def test_flags(self): + skip("broken") + from copy_reg import _HEAPTYPE + assert self.ast.Module.__flags__ & _HEAPTYPE + def test_build_ast(self): ast = self.ast mod = self.get_ast("x = 4") @@ -218,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert exc.value.args[0] == "'Num' object has no attribute 'n'" + assert "Num' object has no attribute 'n'" in exc.value.args[0] x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert exc.value.args[0] == "'Num' object has no attribute 'lineno'" + assert "Num' object has no attribute 'lineno'" in exc.value.args[0] y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert exc.value.args[0] == "'Num' object has no attribute 'foobar'" + assert "Num' object has no attribute 'foobar'" in exc.value.args[0] x = ast.Num(lineno=2) assert x.lineno == 2 @@ -244,9 +249,8 @@ raises(TypeError, ast.Num, 1, 2, lineno=0) def test_issue1680_nonseq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -287,9 +291,8 @@ assert not hasattr(mod.body[0], 'name') def test_issue1680_seq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -392,9 +395,8 @@ import ast num_node = ast.Num(n=2, lineno=2, col_offset=3) dict_res = num_node.__dict__ - assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Num_notfullinit(self): import ast import copy @@ -402,7 +404,7 @@ assert num_node.n == 2 assert num_node.lineno == 2 num_node2 = copy.deepcopy(num_node) - + def test_issue1673_Num_fullinit(self): import ast import copy @@ -413,7 +415,7 @@ assert num_node.col_offset == num_node2.col_offset dict_res = num_node2.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Str(self): import ast import copy @@ -423,4 +425,3 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} - \ No newline at end of file diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -59,7 +59,7 @@ self.tt, self.it, calls_repr)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsEntry.typedef = TypeDef( 'StatsEntry', @@ -86,7 +86,7 @@ frame_repr, self.callcount, self.reccallcount, self.tt, self.it)) def get_code(self, space): - return self.frame + return returns_code(space, self.frame) W_StatsSubEntry.typedef = TypeDef( 'SubStatsEntry', @@ -204,29 +204,67 @@ name = '?' if class_name is None: class_name = w_type.getname(space) # if the rest doesn't work - return "{method '%s' of '%s' objects}" % (name, class_name) + return "" % (name, class_name) def create_spec_for_function(space, w_func): + assert isinstance(w_func, Function) if w_func.w_module is not None: module = space.str_w(w_func.w_module) if module != '__builtin__': - return '{%s.%s}' % (module, w_func.name) - return '{%s}' % w_func.name + return '<%s.%s>' % (module, w_func.name) + return '<%s>' % w_func.name -def create_spec_for_object(space, w_obj): - class_name = space.type(w_obj).getname(space) - return "{'%s' object}" % (class_name,) +def create_spec_for_object(space, w_type): + class_name = w_type.getname(space) + return "<'%s' object>" % (class_name,) -def create_spec(space, w_arg): +class W_DelayedBuiltinStr(W_Root): + # This class should not be seen at app-level, but is useful to + # contain a (w_func, w_type) pair returned by prepare_spec(). + # Turning this pair into a string cannot be done eagerly in + # an @elidable function because of space.str_w(), but it can + # be done lazily when we really want it. + + _immutable_fields_ = ['w_func', 'w_type'] + + def __init__(self, w_func, w_type): + self.w_func = w_func + self.w_type = w_type + self.w_string = None + + def wrap_string(self, space): + if self.w_string is None: + if self.w_type is None: + s = create_spec_for_function(space, self.w_func) + elif self.w_func is None: + s = create_spec_for_object(space, self.w_type) + else: + s = create_spec_for_method(space, self.w_func, self.w_type) + self.w_string = space.wrap(s) + return self.w_string + +W_DelayedBuiltinStr.typedef = TypeDef( + 'DelayedBuiltinStr', + __str__ = interp2app(W_DelayedBuiltinStr.wrap_string), +) + +def returns_code(space, w_frame): + if isinstance(w_frame, W_DelayedBuiltinStr): + return w_frame.wrap_string(space) + return w_frame # actually a PyCode object + + +def prepare_spec(space, w_arg): if isinstance(w_arg, Method): - return create_spec_for_method(space, w_arg.w_function, w_arg.w_class) + return (w_arg.w_function, w_arg.w_class) elif isinstance(w_arg, Function): - return create_spec_for_function(space, w_arg) + return (w_arg, None) else: - return create_spec_for_object(space, w_arg) + return (None, space.type(w_arg)) +prepare_spec._always_inline_ = True def lsprof_call(space, w_self, frame, event, w_arg): @@ -239,12 +277,10 @@ w_self._enter_return(code) elif event == 'c_call': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_call(key) + w_self._enter_builtin_call(w_arg) elif event == 'c_return' or event == 'c_exception': if w_self.builtins: - key = create_spec(space, w_arg) - w_self._enter_builtin_return(key) + w_self._enter_builtin_return(w_arg) else: # ignore or raise an exception??? pass @@ -307,13 +343,14 @@ return entry raise - @jit.elidable - def _get_or_make_builtin_entry(self, key, make=True): + @jit.elidable_promote() + def _get_or_make_builtin_entry(self, w_func, w_type, make): + key = (w_func, w_type) try: return self.builtin_data[key] except KeyError: if make: - entry = ProfilerEntry(self.space.wrap(key)) + entry = ProfilerEntry(W_DelayedBuiltinStr(w_func, w_type)) self.builtin_data[key] = entry return entry raise @@ -337,20 +374,18 @@ context._stop(self, entry) self.current_context = context.previous - def _enter_builtin_call(self, key): - self = jit.promote(self) - key = jit.promote_string(key) - entry = self._get_or_make_builtin_entry(key) + def _enter_builtin_call(self, w_arg): + w_func, w_type = prepare_spec(self.space, w_arg) + entry = self._get_or_make_builtin_entry(w_func, w_type, True) self.current_context = ProfilerContext(self, entry) - def _enter_builtin_return(self, key): + def _enter_builtin_return(self, w_arg): context = self.current_context if context is None: return - self = jit.promote(self) - key = jit.promote_string(key) + w_func, w_type = prepare_spec(self.space, w_arg) try: - entry = self._get_or_make_builtin_entry(key, False) + entry = self._get_or_make_builtin_entry(w_func, w_type, False) except KeyError: pass else: diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -11,6 +11,48 @@ import _lsprof assert repr(_lsprof.Profiler) == "" + def test_builtins(self): + import _lsprof + prof = _lsprof.Profiler() + lst = [] + prof.enable() + lst.append(len(lst)) + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + "", + ) + for entry in stats: + assert entry.code in expected + + def test_builtins_callers(self): + import _lsprof + prof = _lsprof.Profiler(subcalls=True) + lst = [] + def f1(): + lst.append(len(lst)) + prof.enable(subcalls=True) + f1() + prof.disable() + stats = prof.getstats() + expected = ( + "", + "", + ) + by_id = set() + for entry in stats: + if entry.code == f1.__code__: + assert len(entry.calls) == 2 + for subentry in entry.calls: + assert subentry.code in expected + by_id.add(id(subentry.code)) + elif entry.code in expected: + by_id.add(id(entry.code)) + # :-( cProfile.py relies on the id() of the strings... + assert len(by_id) == len(expected) + def test_direct(self): import _lsprof def getticks(): @@ -37,10 +79,8 @@ stats = prof.getstats() entries = {} for entry in stats: - if not hasattr(entry.code, 'co_name'): - print entry.code - else: - entries[entry.code.co_name] = entry + assert hasattr(entry.code, 'co_name') + entries[entry.code.co_name] = entry efoo = entries['foo'] assert efoo.callcount == 2 assert efoo.reccallcount == 1 @@ -104,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -179,12 +219,12 @@ lines.remove(line) break else: - print 'NOT FOUND:', pattern.rstrip('\n') - print '--- GOT ---' - print got - print - print '--- EXPECTED ---' - print expected + print('NOT FOUND: %s' % pattern.rstrip('\n')) + print('--- GOT ---') + print(got) + print('') + print('--- EXPECTED ---') + print(expected) assert False assert not lines finally: diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1084,27 +1084,27 @@ s = S(autofree=True) b = buffer(s) assert len(b) == 40 - b[4] = 'X' - b[:3] = 'ABC' - assert b[:6] == 'ABC\x00X\x00' + b[4] = b'X' + b[:3] = b'ABC' + assert b[:6] == b'ABC\x00X\x00' A = _rawffi.Array('c') a = A(10, autofree=True) - a[3] = 'x' + a[3] = b'x' b = buffer(a) assert len(b) == 10 - assert b[3] == 'x' - b[6] = 'y' - assert a[6] == 'y' - b[3:5] = 'zt' - assert a[3] == 'z' - assert a[4] == 't' + assert b[3] == b'x' + b[6] = b'y' + assert a[6] == b'y' + b[3:5] = b'zt' + assert a[3] == b'z' + assert a[4] == b't' b = memoryview(a) assert len(b) == 10 - assert b[3] == 'z' - b[3] = 'x' - assert b[3] == 'x' + assert b[3] == b'z' + b[3] = b'x' + assert b[3] == b'x' def test_union(self): import _rawffi diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,6 +17,8 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() + from pypy.module._socket.interp_func import State + space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -42,8 +42,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host) + res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -55,8 +56,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host) + res = rsocket.gethostbyaddr(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -310,3 +312,10 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) + +class State(object): + def __init__(self, space): + self.netdb_lock = None + + def startup(self, space): + self.netdb_lock = space.allocate_lock() diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -1,6 +1,7 @@ # NOT_RPYTHON # do not load cppyy here, see _init_pythonify() -import types, sys +import types +import sys # For now, keep namespaces and classes separate as namespaces are extensible @@ -11,7 +12,7 @@ def __getattr__(self, name): try: return get_pycppitem(self, name) # will cache on self - except Exception, e: + except Exception as e: raise AttributeError("%s object has no attribute '%s' (details: %s)" % (self, name, str(e))) @@ -302,7 +303,7 @@ return self._getitem__unchecked(idx) def python_style_sliceable_getitem(self, slice_or_idx): - if type(slice_or_idx) == types.SliceType: + if type(slice_or_idx) == slice: nseq = self.__class__() nseq += [python_style_getitem(self, i) \ for i in range(*slice_or_idx.indices(len(self)))] diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -30,7 +30,7 @@ def test02_instance_data_read_access(self): """Test read access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -117,7 +117,7 @@ def test03_instance_data_write_access(self): """Test write access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -489,14 +489,14 @@ import cppyy four_vector = cppyy.gbl.four_vector - + t1 = four_vector(1., 2., 3., -4.) t2 = four_vector(0., 0., 0., 0.) t3 = four_vector(t1) - + assert t1 == t3 assert t1 != t2 - + for i in range(4): assert t1[i] == t3[i] @@ -625,8 +625,8 @@ def test18_object_and_pointer_comparisons(self): """Verify object and pointer comparisons""" - - import cppyy + + import cppyy gbl = cppyy.gbl c1 = cppyy.bind_object(0, gbl.cppyy_test_data) @@ -662,11 +662,11 @@ def test19_object_validity(self): """Test object validity checking""" - + from cppyy import gbl d = gbl.cppyy_test_pod() - + assert d assert not not d diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,7 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'nditer.nditer', + 'nditer': 'nditer.W_NDIter', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -131,12 +131,13 @@ return dtype def get_name(self): - return self.w_box_type.name + name = self.w_box_type.name + if name.endswith('_'): + name = name[:-1] + return name def descr_get_name(self, space): name = self.get_name() - if name[-1] == '_': - name = name[:-1] if self.is_flexible() and self.elsize != 0: return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) @@ -819,7 +820,7 @@ w_box_type=space.gettypefor(boxes.W_ULongBox), ) aliases = { - NPY.BOOL: ['bool', 'bool8'], + NPY.BOOL: ['bool_', 'bool8'], NPY.BYTE: ['byte'], NPY.UBYTE: ['ubyte'], NPY.SHORT: ['short'], @@ -834,8 +835,8 @@ NPY.CFLOAT: ['csingle'], NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], - NPY.STRING: ['string', 'str'], - NPY.UNICODE: ['unicode'], + NPY.STRING: ['string_', 'str'], + NPY.UNICODE: ['unicode_'], } self.alternate_constructors = { NPY.BOOL: [space.w_bool], diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -492,13 +492,15 @@ w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) -def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): +def descr__new__(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes, + w_casting, w_op_axes, w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, w_buffersize, order) -W_NDIter.typedef = TypeDef( - 'nditer', +W_NDIter.typedef = TypeDef('nditer', + __module__ = 'numpy', + __new__ = interp2app(descr__new__), + __iter__ = interp2app(W_NDIter.descr_iter), __getitem__ = interp2app(W_NDIter.descr_getitem), __setitem__ = interp2app(W_NDIter.descr_setitem), @@ -530,3 +532,4 @@ shape = GetSetProperty(W_NDIter.descr_get_shape), value = GetSetProperty(W_NDIter.descr_get_value), ) +W_NDIter.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -47,6 +47,7 @@ assert d.kind == 'b' assert dtype(d) is d assert dtype('bool') is d + assert dtype('bool_') is d assert dtype('|b1') is d b = '>' if sys.byteorder == 'little' else '<' assert dtype(b + 'i4') is not dtype(b + 'i4') @@ -63,10 +64,12 @@ assert dtype(int).names is None assert dtype(int).hasobject is False assert dtype(int).subdtype is None + assert dtype(str) is dtype('string') is dtype('string_') + assert dtype(unicode) is dtype('unicode') is dtype('unicode_') assert dtype(None) is dtype(float) - for d in [dtype('i4')]: + for d in [dtype('i4'), dtype('bool')]: for key in ["d[2]", "d['z']", "d[None]"]: exc = raises(KeyError, key) assert exc.value[0] == "There are no fields in dtype %s." % str(d) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -3,6 +3,19 @@ class AppTestNDIter(BaseNumpyAppTest): + def test_type(self): + import numpy as np + assert type(np.nditer) is type + assert np.nditer.__name__ == 'nditer' + assert np.nditer.__module__ == 'numpy' + try: + class Sub(np.nditer): + pass + except TypeError as e: + assert "not an acceptable base" in str(e) + else: + assert False + def test_basic(self): from numpy import arange, nditer, ndarray a = arange(6).reshape(2,3) diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -0,0 +1,35 @@ +import py, sys +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + +class TestCProfile(BaseTestPyPyC): + + def test_cprofile_builtin(self): + def main(n): + import _lsprof + prof = _lsprof.Profiler() + i = 0 + lst = [] + prof.enable() + while i < n: + lst.append(i) # ID: append + lst.pop() # ID: pop + i += 1 + prof.disable() + return [(entry.code, entry.callcount) for entry in prof.getstats()] + # + log = self.run(main, [500]) + assert sorted(log.result) == [ + ("", 500), + ("", 1), + ("", 500), + ] + for method in ['append', 'pop']: + loop, = log.loops_by_id(method) + print loop.ops_by_id(method) + # on 32-bit, there is f1=read_timestamp(); ...; + # f2=read_timestamp(); f3=call(llong_sub,f1,f2) + # which should turn into a single PADDQ/PSUBQ + if sys.maxint != 2147483647: + assert ' call(' not in repr(loop.ops_by_id(method)) + assert ' call_may_force(' not in repr(loop.ops_by_id(method)) + assert ' cond_call(' in repr(loop.ops_by_id(method)) diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -10,6 +10,7 @@ mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) # force computation and record what we wrap module = mod.Module(space, W_Root()) + module.startup(space) for name in module.loaders: seeobj_w.append(module._load_lazily(space, name)) if hasattr(module, 'submodules'): diff --git a/pypy/objspace/std/test/test_strbufobject.py b/pypy/objspace/std/test/test_strbufobject.py --- a/pypy/objspace/std/test/test_strbufobject.py +++ b/pypy/objspace/std/test/test_strbufobject.py @@ -45,9 +45,9 @@ assert len(t) == 4 def test_buffer(self): - s = 'a'.__add__('b') - assert buffer(s) == buffer('ab') - assert memoryview(s) == 'ab' + s = b'a'.__add__(b'b') + assert buffer(s) == buffer(b'ab') + assert memoryview(s) == b'ab' def test_add_strbuf(self): # make three strbuf objects diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -60,7 +60,6 @@ class AppTestTypeObject: - def test_abstract_methods(self): class X(object): pass @@ -71,6 +70,13 @@ raises(AttributeError, getattr, type, "__abstractmethods__") raises(TypeError, "int.__abstractmethods__ = ('abc', )") + def test_attribute_error(self): + class X(object): + __module__ = 'test' + x = X() + exc = raises(AttributeError, "x.a") + assert str(exc.value) == "'X' object has no attribute 'a'" + def test_call_type(self): assert type(42) is int C = type('C', (object,), {'x': lambda: 42}) @@ -427,8 +433,7 @@ assert f.__call__() == ((), {}) assert f.__call__("hello", "world") == (("hello", "world"), {}) assert f.__call__(5, bla=6) == ((5,), {"bla": 6}) - assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, - "c": 3}) + assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, "c": 3}) def test_multipleinheritance_fail(self): try: @@ -539,7 +544,6 @@ assert ImmutableDoc.__doc__ == 'foo' def test_metaclass_conflict(self): - class T1(type): pass class T2(type): @@ -555,7 +559,7 @@ def test_metaclass_choice(self): events = [] - + class T1(type): def __new__(*args): events.append(args) @@ -577,7 +581,7 @@ assert type(D1) is T1 assert type(C) is T1 assert type(G) is T1 - + def test_descr_typecheck(self): raises(TypeError,type.__dict__['__name__'].__get__,1) raises(TypeError,type.__dict__['__mro__'].__get__,1) @@ -806,7 +810,7 @@ z2 = Z2() z2.__class__ = Z1 assert z2.__class__ == Z1 - + class I(int): pass class F(float): @@ -825,13 +829,12 @@ pass i = I() - i2 = I() i.__class__ = I2 i2.__class__ = I assert i.__class__ == I2 assert i2.__class__ == I - + i3 = I3() raises(TypeError, "i3.__class__ = I2") i3.__class__ = I4 @@ -882,6 +885,12 @@ Abc.__name__ = 'Def' assert Abc.__name__ == 'Def' raises(TypeError, "Abc.__name__ = 42") + try: + Abc.__name__ = 'G\x00hi' + except ValueError as e: + assert str(e) == "__name__ must not contain null bytes" + else: + assert False def test_compare(self): class A(object): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -489,11 +489,12 @@ def get_module_type_name(w_self): space = w_self.space - w_mod = w_self.get_module() - if space.isinstance_w(w_mod, space.w_str): - mod = space.str_w(w_mod) - if mod != '__builtin__': - return '%s.%s' % (mod, w_self.name) + if not w_self.is_heaptype(): + w_mod = w_self.get_module() + if space.isinstance_w(w_mod, space.w_str): + mod = space.str_w(w_mod) + if mod != '__builtin__': + return '%s.%s' % (mod, w_self.name) return w_self.name def getname(w_self, space): @@ -633,7 +634,10 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise oefmt(space.w_TypeError, "can't set %N.__name__", w_type) - w_type.name = space.str_w(w_value) + name = space.str_w(w_value) + if '\x00' in name: + raise oefmt(space.w_ValueError, "__name__ must not contain null bytes") + w_type.name = name def descr_get__mro__(space, w_type): w_type = _check(space, w_type) diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -266,14 +266,14 @@ # check that the result is really as expected if loopinvariant: if extraeffect != EffectInfo.EF_LOOPINVARIANT: - from rpython.jit.codewriter.policy import log; log.WARNING( + raise Exception( "in operation %r: this calls a _jit_loop_invariant_ function," " but this contradicts other sources (e.g. it can have random" " effects): EF=%s" % (op, extraeffect)) if elidable: if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, EffectInfo.EF_ELIDABLE_CAN_RAISE): - from rpython.jit.codewriter.policy import log; log.WARNING( + raise Exception( "in operation %r: this calls an _elidable_function_," " but this contradicts other sources (e.g. it can have random" " effects): EF=%s" % (op, extraeffect)) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -347,6 +347,21 @@ def forget_numberings(self, box): self.optimizer.forget_numberings(box) + def _can_optimize_call_pure(self, op): + arg_consts = [] + for i in range(op.numargs()): + arg = op.getarg(i) + const = self.optimizer.get_constant_box(arg) + if const is None: + return None + arg_consts.append(const) + else: + # all constant arguments: check if we already know the result + try: + return self.optimizer.call_pure_results[arg_consts] + except KeyError: + return None + class Optimizer(Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -57,6 +57,16 @@ self.emit_operation(nextop) def optimize_CALL_PURE(self, op): + # Step 1: check if all arguments are constant + result = self._can_optimize_call_pure(op) + if result is not None: + # this removes a CALL_PURE with all constant arguments. + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return + + # Step 2: check if all arguments are the same as a previous + # CALL_PURE. args = self.optimizer.make_args_key(op) oldop = self.pure_operations.get(args, None) if oldop is not None and oldop.getdescr() is op.getdescr(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -516,24 +516,13 @@ return False def optimize_CALL_PURE(self, op): - arg_consts = [] - for i in range(op.numargs()): - arg = op.getarg(i) - const = self.get_constant_box(arg) - if const is None: - break - arg_consts.append(const) - else: - # all constant arguments: check if we already know the result - try: - result = self.optimizer.call_pure_results[arg_consts] - except KeyError: - pass - else: - # this removes a CALL_PURE with all constant arguments. - self.make_constant(op.result, result) - self.last_emitted_operation = REMOVED - return + # this removes a CALL_PURE with all constant arguments. + # Note that it's also done in pure.py. For now we need both... + result = self._can_optimize_call_pure(op) + if result is not None: + self.make_constant(op.result, result) + self.last_emitted_operation = REMOVED + return self.emit_operation(op) def optimize_GUARD_NO_EXCEPTION(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5164,7 +5164,6 @@ self.optimize_strunicode_loop(ops, expected) def test_call_pure_vstring_const(self): - py.test.skip("implement me") ops = """ [] p0 = newstr(3) diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -30,7 +30,7 @@ 'stdio.h', 'netdb.h', 'arpa/inet.h', - 'stdint.h', + 'stdint.h', 'errno.h', ) if _HAS_AF_PACKET: @@ -139,7 +139,7 @@ EAI_SOCKTYPE EAI_SYSTEM IPPROTO_AH IPPROTO_BIP IPPROTO_DSTOPTS IPPROTO_EGP IPPROTO_EON IPPROTO_ESP -IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS +IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS IPPROTO_ICMPV6 IPPROTO_IDP IPPROTO_IGMP IPPROTO_IPCOMP IPPROTO_IPIP IPPROTO_IPV4 IPPROTO_IPV6 IPPROTO_MAX IPPROTO_MOBILE IPPROTO_ND IPPROTO_NONE IPPROTO_PIM IPPROTO_PUP IPPROTO_ROUTING IPPROTO_RSVP IPPROTO_TCP IPPROTO_TP @@ -174,7 +174,7 @@ SOCK_DGRAM SOCK_RAW SOCK_RDM SOCK_SEQPACKET SOCK_STREAM -SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE +SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE SO_ACCEPTCONN SO_BROADCAST SO_DEBUG SO_DONTROUTE SO_ERROR SO_EXCLUSIVEADDRUSE SO_KEEPALIVE SO_LINGER SO_OOBINLINE SO_RCVBUF SO_RCVLOWAT SO_RCVTIMEO @@ -286,7 +286,7 @@ ('nl_pid', rffi.INT), ('nl_groups', rffi.INT)], ifdef='AF_NETLINK') - + CConfig.addrinfo = platform.Struct('struct addrinfo', [('ai_flags', rffi.INT), ('ai_family', rffi.INT), diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -18,6 +18,7 @@ from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib import _rsocket_rffi as _c from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rthread import dummy_lock from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET @@ -1124,22 +1125,24 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name): - # XXX use gethostbyname_r() if available, and/or use locks if not +def gethostbyname_ex(name, lock=dummy_lock): + # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) - hostent = _c.gethostbyname(name) - return gethost_common(name, hostent, addr) + with lock: + hostent = _c.gethostbyname(name) + return gethost_common(name, hostent, addr) -def gethostbyaddr(ip): - # XXX use gethostbyaddr_r() if available, and/or use locks if not +def gethostbyaddr(ip, lock=dummy_lock): + # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - p, size = addr.lock_in_addr() - try: - hostent = _c.gethostbyaddr(p, size, addr.family) - finally: - addr.unlock() - return gethost_common(ip, hostent, addr) + with lock: + p, size = addr.lock_in_addr() + try: + hostent = _c.gethostbyaddr(p, size, addr.family) + finally: + addr.unlock() + return gethost_common(ip, hostent, addr) def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,4 +1,3 @@ - from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.conftest import cdir @@ -113,6 +112,24 @@ assert len(y) == 0 return rffi.cast(lltype.Signed, ll_start_new_thread(x)) +class DummyLock(object): + def acquire(self, flag): + return True + + def release(self): + pass + + def _freeze_(self): + return True + + def __enter__(self): + pass + + def __exit__(self, *args): + pass + +dummy_lock = DummyLock() + class Lock(object): """ Container for low-level implementation of a lock object diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -3,6 +3,7 @@ from rpython.rlib.rsocket import * import socket as cpy_socket + def setup_module(mod): rsocket_startup() @@ -61,6 +62,43 @@ py.test.fail("could not find the localhost address in %r" % (address_list,)) +def test_thread_safe_gethostbyname_ex(): + import threading + nthreads = 10 + domain = 'google.com' + result = [0] * nthreads + threads = [None] * nthreads + lock = threading.Lock() + def lookup_name(i): + name, aliases, address_list = gethostbyname_ex(domain, lock) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_name, args=[i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + +def test_thread_safe_gethostbyaddr(): + import threading + nthreads = 10 + ip = '8.8.8.8' + domain = gethostbyaddr(ip)[0] + result = [0] * nthreads + threads = [None] * nthreads + lock = threading.Lock() + def lookup_addr(ip, i): + name, aliases, address_list = gethostbyaddr(ip, lock) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_addr, args=[ip, i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + def test_gethostbyaddr(): try: cpy_socket.gethostbyaddr("::1") diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -693,6 +693,9 @@ return self.visit_ret(line) return [] + def visit_ud2(self, line): + return InsnStop("ud2") # unreachable instruction + def visit_jmp(self, line): tablelabels = [] match = self.r_jmp_switch.match(line) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -421,24 +421,12 @@ #XXX: this conditional part is not tested at all if self.config.translation.gcrootfinder == 'asmgcc': - trackgcfiles = [cfile[:cfile.rfind('.')] for cfile in mk.cfiles] if self.translator.platform.name == 'msvc': - trackgcfiles = [f for f in trackgcfiles - if f.startswith(('implement', 'testing', - '../module_cache/module'))] - sfiles = ['%s.s' % (c,) for c in trackgcfiles] - lblsfiles = ['%s.lbl.s' % (c,) for c in trackgcfiles] - gcmapfiles = ['%s.gcmap' % (c,) for c in trackgcfiles] - mk.definition('ASMFILES', sfiles) - mk.definition('ASMLBLFILES', lblsfiles) - mk.definition('GCMAPFILES', gcmapfiles) - if self.translator.platform.name == 'msvc': - mk.definition('DEBUGFLAGS', '-MD -Zi') + raise Exception("msvc no longer supports asmgcc") + if self.config.translation.shared: + mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') else: - if self.config.translation.shared: - mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') - else: - mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g') + mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g') if self.config.translation.shared: mk.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") @@ -447,46 +435,28 @@ mk.definition('PYTHON', get_recent_cpython_executable()) - if self.translator.platform.name == 'msvc': - lblofiles = [] - for cfile in mk.cfiles: - f = cfile[:cfile.rfind('.')] - if f in trackgcfiles: - ofile = '%s.lbl.obj' % (f,) - else: - ofile = '%s.obj' % (f,) + mk.definition('GCMAPFILES', '$(subst .c,.gcmap,$(SOURCES))') + mk.definition('OBJECTS1', '$(subst .c,.o,$(SOURCES))') + mk.definition('OBJECTS', '$(OBJECTS1) gcmaptable.s') - lblofiles.append(ofile) - mk.definition('ASMLBLOBJFILES', lblofiles) - mk.definition('OBJECTS', 'gcmaptable.obj $(ASMLBLOBJFILES)') - # /Oi (enable intrinsics) and /Ob1 (some inlining) are mandatory - # even in debug builds - mk.definition('ASM_CFLAGS', '$(CFLAGS) $(CFLAGSEXTRA) /Oi /Ob1') - mk.rule('.SUFFIXES', '.s', []) - mk.rule('.s.obj', '', - 'cmd /c $(MASM) /nologo /Cx /Cp /Zm /coff /Fo$@ /c $< $(INCLUDEDIRS)') - mk.rule('.c.gcmap', '', - ['$(CC) /nologo $(ASM_CFLAGS) /c /FAs /Fa$*.s $< $(INCLUDEDIRS)', - 'cmd /c $(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc -t $*.s > $@'] - ) - mk.rule('gcmaptable.c', '$(GCMAPFILES)', - 'cmd /c $(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py -fmsvc $(GCMAPFILES) > $@') + # the rule that transforms %.c into %.o, by compiling it to + # %.s, then applying trackgcroot to get %.lbl.s and %.gcmap, and + # finally by using the assembler ($(CC) again for now) to get %.o + mk.rule('%.o %.gcmap', '%.c', [ + '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< ' + '-o $*.s -S $< $(INCLUDEDIRS)', + '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' + '-t $*.s > $*.gctmp', + '$(CC) -o $*.o -c $*.lbl.s', + 'mv $*.gctmp $*.gcmap', + 'rm $*.s $*.lbl.s']) - else: - mk.definition('OBJECTS', '$(ASMLBLFILES) gcmaptable.s') - mk.rule('%.s', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< -o $@ -S $< $(INCLUDEDIRS)') - mk.rule('%.s', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< -o $@ -S $< $(INCLUDEDIRS)') - mk.rule('%.lbl.s %.gcmap', '%.s', - [ - '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' - '-t $< > $*.gctmp', - 'mv $*.gctmp $*.gcmap']) - mk.rule('gcmaptable.s', '$(GCMAPFILES)', - [ - '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' - '$(GCMAPFILES) > $@.tmp', - 'mv $@.tmp $@']) - mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed") + # the rule to compute gcmaptable.s + mk.rule('gcmaptable.s', '$(GCMAPFILES)', + [ + '$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py ' + '$(GCMAPFILES) > $@.tmp', + 'mv $@.tmp $@']) else: if self.translator.platform.name == 'msvc': diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1123,6 +1123,8 @@ # fd1 = os.open(filename1, os.O_WRONLY | os.O_CREAT, 0666) fd2 = os.open(filename2, os.O_WRONLY | os.O_CREAT, 0666) + # try to ensure we get twice the exact same output below + gc.collect(); gc.collect(); gc.collect() rgc.dump_rpy_heap(fd1) rgc.dump_rpy_heap(fd2) # try twice in a row keepalive_until_here(s2) From noreply at buildbot.pypy.org Fri May 2 23:37:42 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 2 May 2014 23:37:42 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix potential overflow problems Message-ID: <20140502213742.246401C003C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71208:563e581721d0 Date: 2014-05-02 14:35 -0700 http://bitbucket.org/pypy/pypy/changeset/563e581721d0/ Log: fix potential overflow problems diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) From noreply at buildbot.pypy.org Fri May 2 23:37:43 2014 From: noreply at buildbot.pypy.org (wlav) Date: Fri, 2 May 2014 23:37:43 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: consistency in use of integers Message-ID: <20140502213743.756F31C003C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71209:8282572159fe Date: 2014-05-02 14:37 -0700 http://bitbucket.org/pypy/pypy/changeset/8282572159fe/ Log: consistency in use of integers diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -91,7 +91,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -259,10 +259,10 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -281,12 +281,12 @@ def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): @@ -302,7 +302,7 @@ call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) @@ -452,7 +452,7 @@ def c_get_method(space, cppscope, index): args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis From noreply at buildbot.pypy.org Sat May 3 00:57:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 00:57:36 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: additional fixes Message-ID: <20140502225736.E041E1C003C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71210:1a89979e6d52 Date: 2014-05-02 18:52 -0400 http://bitbucket.org/pypy/pypy/changeset/1a89979e6d52/ Log: additional fixes diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -211,17 +211,16 @@ return space.call_method(self.w_raw, "isatty") def repr_w(self, space): - typename = space.type(self).getname(space) - module = space.str_w(space.type(self).get_module()) + typename = space.type(self).name try: w_name = space.getattr(self, space.wrap("name")) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - return space.wrap("<%s.%s>" % (module, typename,)) + return space.wrap("<%s>" % (typename,)) else: name_repr = space.str_w(space.repr(w_name)) - return space.wrap("<%s.%s name=%s>" % (module, typename, name_repr)) + return space.wrap("<%s name=%s>" % (typename, name_repr)) # ______________________________________________ diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -701,7 +701,9 @@ class A(object): pass assert repr(A) == "" - assert repr(type(type)) == "" + A.__module__ = 123 + assert repr(A) == "" + assert repr(type(type)) == "" assert repr(complex) == "" assert repr(property) == "" assert repr(TypeError) == "" diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1098,7 +1098,7 @@ def repr__Type(space, w_obj): w_mod = w_obj.get_module() - if not space.isinstance_w(w_mod, space.w_str): + if w_mod is None or not space.isinstance_w(w_mod, space.w_str): mod = None else: mod = space.str_w(w_mod) From noreply at buildbot.pypy.org Sat May 3 03:21:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:21:28 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support Message-ID: <20140503012128.6F2FE1C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71211:c296ea98cf63 Date: 2014-05-02 21:14 -0400 http://bitbucket.org/pypy/pypy/changeset/c296ea98cf63/ Log: merge reflex-support diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -91,7 +91,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -259,10 +259,10 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -281,12 +281,12 @@ def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): @@ -302,7 +302,7 @@ call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) @@ -452,7 +452,7 @@ def c_get_method(space, cppscope, index): args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,8 +7,6 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - if sys.maxsize < 2 ** 31: - py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") From noreply at buildbot.pypy.org Sat May 3 03:21:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:21:29 +0200 (CEST) Subject: [pypy-commit] pypy default: whitespace Message-ID: <20140503012129.9FC381C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71212:773fc6275c69 Date: 2014-05-02 21:19 -0400 http://bitbucket.org/pypy/pypy/changeset/773fc6275c69/ Log: whitespace diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -154,7 +154,7 @@ Improve optimization of small allocation-heavy loops in the JIT .. branch: reflex-support - + .. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 .. branch: lexer_token_position_class @@ -164,4 +164,3 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr - From noreply at buildbot.pypy.org Sat May 3 03:21:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:21:30 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140503012130.CBE451C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71213:6066ed126e77 Date: 2014-05-02 21:20 -0400 http://bitbucket.org/pypy/pypy/changeset/6066ed126e77/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0f75ad4d14ce +.. startrev: 773fc6275c69 From noreply at buildbot.pypy.org Sat May 3 03:21:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:21:32 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default Message-ID: <20140503012132.2A1251C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71214:16b7931bb487 Date: 2014-05-02 21:20 -0400 http://bitbucket.org/pypy/pypy/changeset/16b7931bb487/ Log: merge default diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -154,7 +154,7 @@ Improve optimization of small allocation-heavy loops in the JIT .. branch: reflex-support - + .. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 .. branch: lexer_token_position_class @@ -164,4 +164,3 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr - diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0f75ad4d14ce +.. startrev: 773fc6275c69 diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1074,7 +1074,9 @@ return x+y ''') """ + prefix = "" if not isinstance(source, str): + flags = source.__code__.co_flags source = py.std.inspect.getsource(source).lstrip() while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function @@ -1083,12 +1085,21 @@ source = source[source.find('\n') + 1:].lstrip() assert source.startswith("def "), "can only transform functions" source = source[4:] + import __future__ + if flags & __future__.CO_FUTURE_DIVISION: + prefix += "from __future__ import division\n" + if flags & __future__.CO_FUTURE_ABSOLUTE_IMPORT: + prefix += "from __future__ import absolute_import\n" + if flags & __future__.CO_FUTURE_PRINT_FUNCTION: + prefix += "from __future__ import print_function\n" + if flags & __future__.CO_FUTURE_UNICODE_LITERALS: + prefix += "from __future__ import unicode_literals\n" p = source.find('(') assert p >= 0 funcname = source[:p].strip() source = source[p:] assert source.strip() - funcsource = "def %s%s\n" % (funcname, source) + funcsource = prefix + "def %s%s\n" % (funcname, source) #for debugging of wrong source code: py.std.parser.suite(funcsource) a = applevel(funcsource, filename=filename) return a.interphook(funcname) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -1,18 +1,20 @@ - # -*- coding: utf-8 -*- +from __future__ import division, print_function # for test_app2interp_future from pypy.interpreter import gateway, argument from pypy.interpreter.gateway import ObjSpace, W_Root, WrappedDefault from pypy.interpreter.signature import Signature import py import sys + class FakeFunc(object): def __init__(self, space, name): self.space = space self.name = name self.defs_w = [] + class TestBuiltinCode: def test_signature(self): def c(space, w_x, w_y, hello_w): @@ -89,8 +91,8 @@ w_result = code.funcrun(FakeFunc(self.space, "c"), args) assert self.space.eq_w(w_result, w(1020)) + class TestGateway: - def test_app2interp(self): w = self.space.wrap def app_g3(a, b): @@ -117,6 +119,14 @@ args = gateway.Arguments(self.space, [w(6)], ['hello', 'world'], [w(7), w(8)]) assert self.space.int_w(gg(self.space, w(3), args)) == 213 + def test_app2interp_future(self): + w = self.space.wrap + def app_g3(a, b): + print(end='') + return a / b + g3 = gateway.app2interp_temp(app_g3) + assert self.space.eq_w(g3(self.space, w(1), w(4),), w(0.25)) + def test_interp2app(self): space = self.space w = space.wrap @@ -616,7 +626,7 @@ w_app_f = self.space.wrap(app_f) assert isinstance(w_app_f.code, gateway.BuiltinCode2) - + called = [] fastcall_2 = w_app_f.code.fastcall_2 def witness_fastcall_2(space, w_func, w_a, w_b): @@ -736,7 +746,6 @@ class TestPassThroughArguments: - def test_pass_trough_arguments0(self): space = self.space @@ -834,7 +843,6 @@ class AppTestKeywordsToBuiltinSanity(object): - def test_type(self): class X(object): def __init__(self, **kw): @@ -873,4 +881,3 @@ d.update(**{clash: 33}) dict.update(d, **{clash: 33}) - diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.func_code: + if entry.code == f1.__code__: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -144,8 +144,8 @@ entries = {} for entry in stats: entries[entry.code] = entry - efoo = entries[foo.func_code] - ebar = entries[bar.func_code] + efoo = entries[foo.__code__] + ebar = entries[bar.__code__] assert 0.9 < efoo.totaltime < 2.9 # --- cannot test .inlinetime, because it does not include # --- the time spent doing the call to time.time() @@ -219,12 +219,12 @@ lines.remove(line) break else: - print 'NOT FOUND:', pattern.rstrip('\n') - print '--- GOT ---' - print got - print - print '--- EXPECTED ---' - print expected + print('NOT FOUND: %s' % pattern.rstrip('\n')) + print('--- GOT ---') + print(got) + print('') + print('--- EXPECTED ---') + print(expected) assert False assert not lines finally: diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -91,7 +91,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -259,10 +259,10 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -281,12 +281,12 @@ def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): @@ -302,7 +302,7 @@ call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) @@ -452,7 +452,7 @@ def c_get_method(space, cppscope, index): args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,8 +7,6 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - if sys.maxsize < 2 ** 31: - py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") From noreply at buildbot.pypy.org Sat May 3 03:25:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:25:58 +0200 (CEST) Subject: [pypy-commit] pypy fix-tpname: close branch for merging Message-ID: <20140503012558.BAE781C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: fix-tpname Changeset: r71215:d307272e3d81 Date: 2014-05-02 21:22 -0400 http://bitbucket.org/pypy/pypy/changeset/d307272e3d81/ Log: close branch for merging From noreply at buildbot.pypy.org Sat May 3 03:26:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:26:01 +0200 (CEST) Subject: [pypy-commit] pypy default: merge fix-tpname Message-ID: <20140503012601.4F61F1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71216:4d228428822c Date: 2014-05-02 21:22 -0400 http://bitbucket.org/pypy/pypy/changeset/4d228428822c/ Log: merge fix-tpname diff too long, truncating to 2000 out of 2173 lines diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -121,10 +121,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, @@ -2804,6 +2803,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(mod)), ) +mod.typedef.heaptype = True def Module_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2851,6 +2851,7 @@ __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) +Module.typedef.heaptype = True def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2898,6 +2899,7 @@ __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) +Interactive.typedef.heaptype = True def Expression_get_body(space, w_self): if w_self.w_dict is not None: @@ -2951,6 +2953,7 @@ __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) +Expression.typedef.heaptype = True def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2998,6 +3001,7 @@ __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) +Suite.typedef.heaptype = True def stmt_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -3063,6 +3067,7 @@ col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) +stmt.typedef.heaptype = True def FunctionDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3191,6 +3196,7 @@ __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) +FunctionDef.typedef.heaptype = True def ClassDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3315,6 +3321,7 @@ __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) +ClassDef.typedef.heaptype = True def Return_get_value(space, w_self): if w_self.w_dict is not None: @@ -3368,6 +3375,7 @@ __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) +Return.typedef.heaptype = True def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3415,6 +3423,7 @@ __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) +Delete.typedef.heaptype = True def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3492,6 +3501,7 @@ __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) +Assign.typedef.heaptype = True def AugAssign_get_target(space, w_self): if w_self.w_dict is not None: @@ -3605,6 +3615,7 @@ __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) +AugAssign.typedef.heaptype = True def Print_get_dest(space, w_self): if w_self.w_dict is not None: @@ -3711,6 +3722,7 @@ __new__=interp2app(get_AST_new(Print)), __init__=interp2app(Print_init), ) +Print.typedef.heaptype = True def For_get_target(space, w_self): if w_self.w_dict is not None: @@ -3842,6 +3854,7 @@ __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) +For.typedef.heaptype = True def While_get_test(space, w_self): if w_self.w_dict is not None: @@ -3943,6 +3956,7 @@ __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) +While.typedef.heaptype = True def If_get_test(space, w_self): if w_self.w_dict is not None: @@ -4044,6 +4058,7 @@ __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) +If.typedef.heaptype = True def With_get_context_expr(space, w_self): if w_self.w_dict is not None: @@ -4151,6 +4166,7 @@ __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) +With.typedef.heaptype = True def Raise_get_type(space, w_self): if w_self.w_dict is not None: @@ -4264,6 +4280,7 @@ __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) +Raise.typedef.heaptype = True def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4359,6 +4376,7 @@ __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) +TryExcept.typedef.heaptype = True def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4430,6 +4448,7 @@ __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) +TryFinally.typedef.heaptype = True def Assert_get_test(space, w_self): if w_self.w_dict is not None: @@ -4513,6 +4532,7 @@ __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) +Assert.typedef.heaptype = True def Import_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4560,6 +4580,7 @@ __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) +Import.typedef.heaptype = True def ImportFrom_get_module(space, w_self): if w_self.w_dict is not None: @@ -4668,6 +4689,7 @@ __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) +ImportFrom.typedef.heaptype = True def Exec_get_body(space, w_self): if w_self.w_dict is not None: @@ -4781,6 +4803,7 @@ __new__=interp2app(get_AST_new(Exec)), __init__=interp2app(Exec_init), ) +Exec.typedef.heaptype = True def Global_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4828,6 +4851,7 @@ __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) +Global.typedef.heaptype = True def Expr_get_value(space, w_self): if w_self.w_dict is not None: @@ -4881,6 +4905,7 @@ __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) +Expr.typedef.heaptype = True def Pass_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Pass, w_self) @@ -4898,6 +4923,7 @@ __new__=interp2app(get_AST_new(Pass)), __init__=interp2app(Pass_init), ) +Pass.typedef.heaptype = True def Break_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Break, w_self) @@ -4915,6 +4941,7 @@ __new__=interp2app(get_AST_new(Break)), __init__=interp2app(Break_init), ) +Break.typedef.heaptype = True def Continue_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Continue, w_self) @@ -4932,6 +4959,7 @@ __new__=interp2app(get_AST_new(Continue)), __init__=interp2app(Continue_init), ) +Continue.typedef.heaptype = True def expr_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -4997,6 +5025,7 @@ col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) +expr.typedef.heaptype = True def BoolOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5074,6 +5103,7 @@ __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) +BoolOp.typedef.heaptype = True def BinOp_get_left(space, w_self): if w_self.w_dict is not None: @@ -5187,6 +5217,7 @@ __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) +BinOp.typedef.heaptype = True def UnaryOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5270,6 +5301,7 @@ __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) +UnaryOp.typedef.heaptype = True def Lambda_get_args(space, w_self): if w_self.w_dict is not None: @@ -5351,6 +5383,7 @@ __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) +Lambda.typedef.heaptype = True def IfExp_get_test(space, w_self): if w_self.w_dict is not None: @@ -5464,6 +5497,7 @@ __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) +IfExp.typedef.heaptype = True def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: @@ -5535,6 +5569,7 @@ __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) +Dict.typedef.heaptype = True def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -5582,6 +5617,7 @@ __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) +Set.typedef.heaptype = True def ListComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5659,6 +5695,7 @@ __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) +ListComp.typedef.heaptype = True def SetComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5736,6 +5773,7 @@ __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) +SetComp.typedef.heaptype = True def DictComp_get_key(space, w_self): if w_self.w_dict is not None: @@ -5843,6 +5881,7 @@ __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) +DictComp.typedef.heaptype = True def GeneratorExp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5920,6 +5959,7 @@ __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) +GeneratorExp.typedef.heaptype = True def Yield_get_value(space, w_self): if w_self.w_dict is not None: @@ -5973,6 +6013,7 @@ __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +Yield.typedef.heaptype = True def Compare_get_left(space, w_self): if w_self.w_dict is not None: @@ -6074,6 +6115,7 @@ __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) +Compare.typedef.heaptype = True def Call_get_func(space, w_self): if w_self.w_dict is not None: @@ -6235,6 +6277,7 @@ __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) +Call.typedef.heaptype = True def Repr_get_value(space, w_self): if w_self.w_dict is not None: @@ -6288,6 +6331,7 @@ __new__=interp2app(get_AST_new(Repr)), __init__=interp2app(Repr_init), ) +Repr.typedef.heaptype = True def Num_get_n(space, w_self): if w_self.w_dict is not None: @@ -6340,6 +6384,7 @@ __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) +Num.typedef.heaptype = True def Str_get_s(space, w_self): if w_self.w_dict is not None: @@ -6392,6 +6437,7 @@ __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) +Str.typedef.heaptype = True def Attribute_get_value(space, w_self): if w_self.w_dict is not None: @@ -6504,6 +6550,7 @@ __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) +Attribute.typedef.heaptype = True def Subscript_get_value(space, w_self): if w_self.w_dict is not None: @@ -6617,6 +6664,7 @@ __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) +Subscript.typedef.heaptype = True def Name_get_id(space, w_self): if w_self.w_dict is not None: @@ -6699,6 +6747,7 @@ __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) +Name.typedef.heaptype = True def List_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6776,6 +6825,7 @@ __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) +List.typedef.heaptype = True def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6853,6 +6903,7 @@ __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) +Tuple.typedef.heaptype = True def Const_get_value(space, w_self): if w_self.w_dict is not None: @@ -6905,6 +6956,7 @@ __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) +Const.typedef.heaptype = True expr_context.typedef = typedef.TypeDef("expr_context", AST.typedef, @@ -6912,6 +6964,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(expr_context)), ) +expr_context.typedef.heaptype = True _Load.typedef = typedef.TypeDef("Load", expr_context.typedef, @@ -6919,6 +6972,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Load)), ) +_Load.typedef.heaptype = True _Store.typedef = typedef.TypeDef("Store", expr_context.typedef, @@ -6926,6 +6980,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Store)), ) +_Store.typedef.heaptype = True _Del.typedef = typedef.TypeDef("Del", expr_context.typedef, @@ -6933,6 +6988,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Del)), ) +_Del.typedef.heaptype = True _AugLoad.typedef = typedef.TypeDef("AugLoad", expr_context.typedef, @@ -6940,6 +6996,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugLoad)), ) +_AugLoad.typedef.heaptype = True _AugStore.typedef = typedef.TypeDef("AugStore", expr_context.typedef, @@ -6947,6 +7004,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugStore)), ) +_AugStore.typedef.heaptype = True _Param.typedef = typedef.TypeDef("Param", expr_context.typedef, @@ -6954,6 +7012,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Param)), ) +_Param.typedef.heaptype = True slice.typedef = typedef.TypeDef("slice", AST.typedef, @@ -6961,6 +7020,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(slice)), ) +slice.typedef.heaptype = True def Ellipsis_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Ellipsis, w_self) @@ -6978,6 +7038,7 @@ __new__=interp2app(get_AST_new(Ellipsis)), __init__=interp2app(Ellipsis_init), ) +Ellipsis.typedef.heaptype = True def Slice_get_lower(space, w_self): if w_self.w_dict is not None: @@ -7091,6 +7152,7 @@ __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) +Slice.typedef.heaptype = True def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: @@ -7138,6 +7200,7 @@ __new__=interp2app(get_AST_new(ExtSlice)), __init__=interp2app(ExtSlice_init), ) +ExtSlice.typedef.heaptype = True def Index_get_value(space, w_self): if w_self.w_dict is not None: @@ -7191,6 +7254,7 @@ __new__=interp2app(get_AST_new(Index)), __init__=interp2app(Index_init), ) +Index.typedef.heaptype = True boolop.typedef = typedef.TypeDef("boolop", AST.typedef, @@ -7198,6 +7262,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(boolop)), ) +boolop.typedef.heaptype = True _And.typedef = typedef.TypeDef("And", boolop.typedef, @@ -7205,6 +7270,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_And)), ) +_And.typedef.heaptype = True _Or.typedef = typedef.TypeDef("Or", boolop.typedef, @@ -7212,6 +7278,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Or)), ) +_Or.typedef.heaptype = True operator.typedef = typedef.TypeDef("operator", AST.typedef, @@ -7219,6 +7286,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(operator)), ) +operator.typedef.heaptype = True _Add.typedef = typedef.TypeDef("Add", operator.typedef, @@ -7226,6 +7294,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Add)), ) +_Add.typedef.heaptype = True _Sub.typedef = typedef.TypeDef("Sub", operator.typedef, @@ -7233,6 +7302,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Sub)), ) +_Sub.typedef.heaptype = True _Mult.typedef = typedef.TypeDef("Mult", operator.typedef, @@ -7240,6 +7310,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mult)), ) +_Mult.typedef.heaptype = True _Div.typedef = typedef.TypeDef("Div", operator.typedef, @@ -7247,6 +7318,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Div)), ) +_Div.typedef.heaptype = True _Mod.typedef = typedef.TypeDef("Mod", operator.typedef, @@ -7254,6 +7326,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mod)), ) +_Mod.typedef.heaptype = True _Pow.typedef = typedef.TypeDef("Pow", operator.typedef, @@ -7261,6 +7334,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Pow)), ) +_Pow.typedef.heaptype = True _LShift.typedef = typedef.TypeDef("LShift", operator.typedef, @@ -7268,6 +7342,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LShift)), ) +_LShift.typedef.heaptype = True _RShift.typedef = typedef.TypeDef("RShift", operator.typedef, @@ -7275,6 +7350,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_RShift)), ) +_RShift.typedef.heaptype = True _BitOr.typedef = typedef.TypeDef("BitOr", operator.typedef, @@ -7282,6 +7358,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitOr)), ) +_BitOr.typedef.heaptype = True _BitXor.typedef = typedef.TypeDef("BitXor", operator.typedef, @@ -7289,6 +7366,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitXor)), ) +_BitXor.typedef.heaptype = True _BitAnd.typedef = typedef.TypeDef("BitAnd", operator.typedef, @@ -7296,6 +7374,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitAnd)), ) +_BitAnd.typedef.heaptype = True _FloorDiv.typedef = typedef.TypeDef("FloorDiv", operator.typedef, @@ -7303,6 +7382,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_FloorDiv)), ) +_FloorDiv.typedef.heaptype = True unaryop.typedef = typedef.TypeDef("unaryop", AST.typedef, @@ -7310,6 +7390,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(unaryop)), ) +unaryop.typedef.heaptype = True _Invert.typedef = typedef.TypeDef("Invert", unaryop.typedef, @@ -7317,6 +7398,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Invert)), ) +_Invert.typedef.heaptype = True _Not.typedef = typedef.TypeDef("Not", unaryop.typedef, @@ -7324,6 +7406,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Not)), ) +_Not.typedef.heaptype = True _UAdd.typedef = typedef.TypeDef("UAdd", unaryop.typedef, @@ -7331,6 +7414,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_UAdd)), ) +_UAdd.typedef.heaptype = True _USub.typedef = typedef.TypeDef("USub", unaryop.typedef, @@ -7338,6 +7422,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_USub)), ) +_USub.typedef.heaptype = True cmpop.typedef = typedef.TypeDef("cmpop", AST.typedef, @@ -7345,6 +7430,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(cmpop)), ) +cmpop.typedef.heaptype = True _Eq.typedef = typedef.TypeDef("Eq", cmpop.typedef, @@ -7352,6 +7438,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Eq)), ) +_Eq.typedef.heaptype = True _NotEq.typedef = typedef.TypeDef("NotEq", cmpop.typedef, @@ -7359,6 +7446,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotEq)), ) +_NotEq.typedef.heaptype = True _Lt.typedef = typedef.TypeDef("Lt", cmpop.typedef, @@ -7366,6 +7454,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Lt)), ) +_Lt.typedef.heaptype = True _LtE.typedef = typedef.TypeDef("LtE", cmpop.typedef, @@ -7373,6 +7462,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LtE)), ) +_LtE.typedef.heaptype = True _Gt.typedef = typedef.TypeDef("Gt", cmpop.typedef, @@ -7380,6 +7470,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Gt)), ) +_Gt.typedef.heaptype = True _GtE.typedef = typedef.TypeDef("GtE", cmpop.typedef, @@ -7387,6 +7478,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_GtE)), ) +_GtE.typedef.heaptype = True _Is.typedef = typedef.TypeDef("Is", cmpop.typedef, @@ -7394,6 +7486,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Is)), ) +_Is.typedef.heaptype = True _IsNot.typedef = typedef.TypeDef("IsNot", cmpop.typedef, @@ -7401,6 +7494,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_IsNot)), ) +_IsNot.typedef.heaptype = True _In.typedef = typedef.TypeDef("In", cmpop.typedef, @@ -7408,6 +7502,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_In)), ) +_In.typedef.heaptype = True _NotIn.typedef = typedef.TypeDef("NotIn", cmpop.typedef, @@ -7415,6 +7510,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotIn)), ) +_NotIn.typedef.heaptype = True def comprehension_get_target(space, w_self): if w_self.w_dict is not None: @@ -7522,6 +7618,7 @@ __new__=interp2app(get_AST_new(comprehension)), __init__=interp2app(comprehension_init), ) +comprehension.typedef.heaptype = True def excepthandler_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -7587,6 +7684,7 @@ col_offset=typedef.GetSetProperty(excepthandler_get_col_offset, excepthandler_set_col_offset, excepthandler_del_col_offset, cls=excepthandler), __new__=interp2app(get_AST_new(excepthandler)), ) +excepthandler.typedef.heaptype = True def ExceptHandler_get_type(space, w_self): if w_self.w_dict is not None: @@ -7694,6 +7792,7 @@ __new__=interp2app(get_AST_new(ExceptHandler)), __init__=interp2app(ExceptHandler_init), ) +ExceptHandler.typedef.heaptype = True def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: @@ -7829,6 +7928,7 @@ __new__=interp2app(get_AST_new(arguments)), __init__=interp2app(arguments_init), ) +arguments.typedef.heaptype = True def keyword_get_arg(space, w_self): if w_self.w_dict is not None: @@ -7911,6 +8011,7 @@ __new__=interp2app(get_AST_new(keyword)), __init__=interp2app(keyword_init), ) +keyword.typedef.heaptype = True def alias_get_name(space, w_self): if w_self.w_dict is not None: @@ -7995,4 +8096,5 @@ __new__=interp2app(get_AST_new(alias)), __init__=interp2app(alias_init), ) - +alias.typedef.heaptype = True + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -352,6 +352,7 @@ if needs_init: self.emit("__init__=interp2app(%s_init)," % (name,), 1) self.emit(")") + self.emit("%s.typedef.heaptype = True" % name) self.emit("") def make_init(self, name, fields): @@ -669,10 +670,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -363,7 +363,7 @@ if fmt == 'R': result = space.str_w(space.repr(value)) elif fmt == 'T': - result = space.type(value).get_module_type_name() + result = space.type(value).name elif fmt == 'N': result = value.getname(space) else: @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).get_module_type_name() + %T - The result of space.type(w_arg).name """ if not len(args): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -22,6 +22,7 @@ else: bases = [__base] self.bases = bases + self.heaptype = False self.hasdict = '__dict__' in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.pop('__doc__', None) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -20,9 +20,9 @@ assert isinstance(ast.__version__, str) def test_flags(self): - skip("broken") from copy_reg import _HEAPTYPE - assert self.ast.Module.__flags__ & _HEAPTYPE + assert self.ast.AST.__flags__ & _HEAPTYPE == 0 + assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE def test_build_ast(self): ast = self.ast @@ -223,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert "Num' object has no attribute 'n'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'n'" x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert "Num' object has no attribute 'lineno'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'lineno'" y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert "Num' object has no attribute 'foobar'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'foobar'" x = ast.Num(lineno=2) assert x.lineno == 2 @@ -407,7 +407,7 @@ def test_issue1673_Num_fullinit(self): import ast - import copy + import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) assert num_node.n == num_node2.n diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -56,8 +56,7 @@ raise MiniBuffer.typedef = TypeDef( - "buffer", - __module__ = "_cffi_backend", + "_cffi_backend.buffer", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -441,7 +441,7 @@ W_CData.typedef = TypeDef( - 'CData', + '_cffi_backend.CData', __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -130,8 +130,7 @@ return self.ctitem.convert_to_object(result) W_CDataIter.typedef = TypeDef( - 'CDataIter', - __module__ = '_cffi_backend', + '_cffi_backend.CDataIter', __iter__ = interp2app(W_CDataIter.iter_w), next = interp2app(W_CDataIter.next_w), ) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -210,8 +210,7 @@ W_CType.typedef = TypeDef( - 'CTypeDescr', - __module__ = '_cffi_backend', + '_cffi_backend.CTypeDescr', __repr__ = interp2app(W_CType.repr), __weakref__ = make_weakref_descr(W_CType), kind = GetSetProperty(W_CType.fget_kind, doc="kind"), diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -307,8 +307,7 @@ W_CField.typedef = TypeDef( - 'CField', - __module__ = '_cffi_backend', + '_cffi_backend.CField', type = interp_attrproperty('ctype', W_CField), offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -85,8 +85,7 @@ W_Library.typedef = TypeDef( - 'Library', - __module__ = '_cffi_backend', + '_cffi_backend.Library', __repr__ = interp2app(W_Library.repr), load_function = interp2app(W_Library.load_function), read_variable = interp2app(W_Library.read_variable), diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -463,11 +463,10 @@ W_Deque.__init__(space.interp_w(W_Deque, w_self), space) return w_self -W_Deque.typedef = TypeDef("deque", +W_Deque.typedef = TypeDef("collections.deque", __doc__ = """deque(iterable[, maxlen]) --> deque object Build an ordered collection accessible from endpoints only.""", - __module__ = '_collections', __new__ = interp2app(descr__new__), __init__ = interp2app(W_Deque.init), append = interp2app(W_Deque.append), diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -4,6 +4,8 @@ def test_basics(self): from _collections import deque + assert deque.__module__ == 'collections' + d = deque(xrange(-5125, -5000)) d.__init__(xrange(200)) for i in xrange(200, 400): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -136,8 +136,7 @@ W_Continulet.typedef = TypeDef( - 'continulet', - __module__ = '_continuation', + '_continuation.continulet', __new__ = interp2app(W_Continulet___new__), __init__ = interp2app(W_Continulet.descr_init), switch = interp2app(W_Continulet.descr_switch), diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -154,8 +154,7 @@ W_Dialect.typedef = TypeDef( - 'Dialect', - __module__ = '_csv', + '_csv.Dialect', __new__ = interp2app(W_Dialect___new__), delimiter = interp_attrproperty('delimiter', W_Dialect), diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -245,8 +245,7 @@ return W_Reader(space, dialect, w_iter) W_Reader.typedef = TypeDef( - 'reader', - __module__ = '_csv', + '_csv.reader', dialect = interp_attrproperty_w('dialect', W_Reader), line_num = interp_attrproperty('line_num', W_Reader), __iter__ = interp2app(W_Reader.iter_w), diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -160,8 +160,7 @@ return W_Writer(space, dialect, w_fileobj) W_Writer.typedef = TypeDef( - 'writer', - __module__ = '_csv', + '_csv.writer', dialect = interp_attrproperty_w('dialect', W_Writer), writerow = interp2app(W_Writer.writerow), writerows = interp2app(W_Writer.writerows), diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -211,17 +211,16 @@ return space.call_method(self.w_raw, "isatty") def repr_w(self, space): - typename = space.type(self).getname(space) - module = space.str_w(space.type(self).get_module()) + typename = space.type(self).name try: w_name = space.getattr(self, space.wrap("name")) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - return space.wrap("<%s.%s>" % (module, typename,)) + return space.wrap("<%s>" % (typename,)) else: name_repr = space.str_w(space.repr(w_name)) - return space.wrap("<%s.%s name=%s>" % (module, typename, name_repr)) + return space.wrap("<%s name=%s>" % (typename, name_repr)) # ______________________________________________ @@ -844,10 +843,9 @@ self.state = STATE_OK W_BufferedReader.typedef = TypeDef( - 'BufferedReader', W_BufferedIOBase.typedef, + '_io.BufferedReader', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedReader), __init__ = interp2app(W_BufferedReader.descr_init), - __module__ = "_io", read = interp2app(W_BufferedReader.read_w), peek = interp2app(W_BufferedReader.peek_w), @@ -892,10 +890,9 @@ self.state = STATE_OK W_BufferedWriter.typedef = TypeDef( - 'BufferedWriter', W_BufferedIOBase.typedef, + '_io.BufferedWriter', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedWriter), __init__ = interp2app(W_BufferedWriter.descr_init), - __module__ = "_io", write = interp2app(W_BufferedWriter.write_w), flush = interp2app(W_BufferedWriter.flush_w), @@ -1015,10 +1012,9 @@ self.state = STATE_OK W_BufferedRandom.typedef = TypeDef( - 'BufferedRandom', W_BufferedIOBase.typedef, + '_io.BufferedRandom', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRandom), __init__ = interp2app(W_BufferedRandom.descr_init), - __module__ = "_io", read = interp2app(W_BufferedRandom.read_w), peek = interp2app(W_BufferedRandom.peek_w), diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -264,8 +264,7 @@ W_StringIO.typedef = TypeDef( - 'StringIO', W_TextIOBase.typedef, - __module__ = "_io", + '_io.StringIO', W_TextIOBase.typedef, __new__ = generic_new_descr(W_StringIO), __init__ = interp2app(W_StringIO.descr_init), __getstate__ = interp2app(W_StringIO.descr_getstate), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1015,11 +1015,10 @@ self.chunk_size = size W_TextIOWrapper.typedef = TypeDef( - 'TextIOWrapper', W_TextIOBase.typedef, + '_io.TextIOWrapper', W_TextIOBase.typedef, __new__ = generic_new_descr(W_TextIOWrapper), __init__ = interp2app(W_TextIOWrapper.descr_init), __repr__ = interp2app(W_TextIOWrapper.descr_repr), - __module__ = "_io", next = interp2app(W_TextIOWrapper.next_w), read = interp2app(W_TextIOWrapper.read_w), diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + class_name = w_realclass.name else: name = '?' if class_name is None: @@ -440,8 +440,7 @@ return space.wrap(p) W_Profiler.typedef = TypeDef( - 'Profiler', - __module__ = '_lsprof', + '_lsprof.Profiler', __new__ = interp2app(descr_new_profile), enable = interp2app(W_Profiler.enable), disable = interp2app(W_Profiler.disable), diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -75,7 +75,6 @@ MultibyteIncrementalDecoder.typedef = TypeDef( 'MultibyteIncrementalDecoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbidecoder_new), decode = interp2app(MultibyteIncrementalDecoder.decode_w), reset = interp2app(MultibyteIncrementalDecoder.reset_w), @@ -124,7 +123,6 @@ MultibyteIncrementalEncoder.typedef = TypeDef( 'MultibyteIncrementalEncoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbiencoder_new), encode = interp2app(MultibyteIncrementalEncoder.encode_w), reset = interp2app(MultibyteIncrementalEncoder.reset_w), diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -46,7 +46,6 @@ MultibyteCodec.typedef = TypeDef( 'MultibyteCodec', - __module__ = '_multibytecodec', decode = interp2app(MultibyteCodec.decode), encode = interp2app(MultibyteCodec.encode), ) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -353,9 +353,8 @@ return bool(r) W_FileConnection.typedef = TypeDef( - 'Connection', W_BaseConnection.typedef, + '_multiprocessing.Connection', W_BaseConnection.typedef, __new__ = interp2app(W_FileConnection.descr_new_file.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_FileConnection.fileno), ) @@ -534,8 +533,7 @@ if sys.platform == 'win32': W_PipeConnection.typedef = TypeDef( - 'PipeConnection', W_BaseConnection.typedef, + '_multiprocessing.PipeConnection', W_BaseConnection.typedef, __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_PipeConnection.fileno), ) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,8 +600,7 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("socket", - __module__ = "_socket", +W_RSocket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -488,9 +488,8 @@ return space.wrap(s) W_ArrayBase.typedef = TypeDef( - 'array', + 'array.array', __new__ = interp2app(w_array), - __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -6,6 +6,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, interp_attrproperty, interp_attrproperty_w) +from pypy.objspace.std.typeobject import W_TypeObject from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, @@ -158,7 +159,9 @@ self.doc = doc self.func = func pyo = rffi.cast(PyObject, pto) - self.w_objclass = from_ref(space, pyo) + w_type = from_ref(space, pyo) + assert isinstance(w_type, W_TypeObject) + self.w_objclass = w_type def call(self, space, w_self, w_args, w_kw): if self.wrapper_func is None: @@ -174,7 +177,7 @@ def descr_method_repr(self): return self.space.wrap("" % (self.method_name, - self.w_objclass.getname(self.space))) + self.w_objclass.name)) def cwrapper_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCWrapperObject, w_self) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -33,7 +33,7 @@ assert "copy" in repr(module.fooType.copy) assert repr(module.fooType) == "" assert repr(obj2) == "" - assert repr(module.fooType.__call__) == "" + assert repr(module.fooType.__call__) == "" assert obj2(foo=1, bar=2) == dict(foo=1, bar=2) print(obj.foo) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -291,14 +291,9 @@ convert_getset_defs(space, dict_w, pto.c_tp_getset, self) convert_member_defs(space, dict_w, pto.c_tp_members, self) - full_name = rffi.charp2str(pto.c_tp_name) - if '.' in full_name: - module_name, extension_name = rsplit(full_name, ".", 1) - dict_w["__module__"] = space.wrap(module_name) - else: - extension_name = full_name + name = rffi.charp2str(pto.c_tp_name) - W_TypeObject.__init__(self, space, extension_name, + W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w) if not space.is_true(space.issubtype(self, space.w_type)): self.flag_cpytype = True @@ -518,7 +513,7 @@ from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: - pto.c_tp_name = rffi.str2charp(w_type.getname(space)) + pto.c_tp_name = rffi.str2charp(w_type.name) pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 # uninitialized fields: diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -207,9 +207,8 @@ return interp2app(descr_new_base_exception) W_BaseException.typedef = TypeDef( - 'BaseException', + 'exceptions.BaseException', __doc__ = W_BaseException.__doc__, - __module__ = 'exceptions', __new__ = _new(W_BaseException), __init__ = interp2app(W_BaseException.descr_init), __str__ = interp2app(W_BaseException.descr_str), @@ -244,10 +243,9 @@ for k, v in kwargs.items(): kwargs[k] = interp2app(v.__get__(None, realbase)) W_Exc.typedef = TypeDef( - name, + 'exceptions.' + name, base.typedef, __doc__ = W_Exc.__doc__, - __module__ = 'exceptions', **kwargs ) W_Exc.typedef.applevel_subclasses_base = realbase @@ -312,10 +310,9 @@ """) W_UnicodeTranslateError.typedef = TypeDef( - 'UnicodeTranslateError', + 'exceptions.UnicodeTranslateError', W_UnicodeError.typedef, __doc__ = W_UnicodeTranslateError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeTranslateError), __init__ = interp2app(W_UnicodeTranslateError.descr_init), __str__ = interp2app(W_UnicodeTranslateError.descr_str), @@ -396,10 +393,9 @@ return W_BaseException.descr_str(self, space) W_EnvironmentError.typedef = TypeDef( - 'EnvironmentError', + 'exceptions.EnvironmentError', W_StandardError.typedef, __doc__ = W_EnvironmentError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_EnvironmentError), __reduce__ = interp2app(W_EnvironmentError.descr_reduce), __init__ = interp2app(W_EnvironmentError.descr_init), @@ -453,10 +449,9 @@ _winerror_to_errno, _default_errno = {}, 22 # EINVAL W_WindowsError.typedef = TypeDef( - "WindowsError", + "exceptions.WindowsError", W_OSError.typedef, __doc__ = W_WindowsError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_WindowsError), __init__ = interp2app(W_WindowsError.descr_init), __str__ = interp2app(W_WindowsError.descr_str), @@ -557,14 +552,13 @@ return W_StandardError.descr_repr(self, space) W_SyntaxError.typedef = TypeDef( - 'SyntaxError', + 'exceptions.SyntaxError', W_StandardError.typedef, __new__ = _new(W_SyntaxError), __init__ = interp2app(W_SyntaxError.descr_init), __str__ = interp2app(W_SyntaxError.descr_str), __repr__ = interp2app(W_SyntaxError.descr_repr), __doc__ = W_SyntaxError.__doc__, - __module__ = 'exceptions', msg = readwrite_attrproperty_w('w_msg', W_SyntaxError), filename = readwrite_attrproperty_w('w_filename', W_SyntaxError), lineno = readwrite_attrproperty_w('w_lineno', W_SyntaxError), @@ -593,12 +587,11 @@ W_BaseException.descr_init(self, space, args_w) W_SystemExit.typedef = TypeDef( - 'SystemExit', + 'exceptions.SystemExit', W_BaseException.typedef, __new__ = _new(W_SystemExit), __init__ = interp2app(W_SystemExit.descr_init), __doc__ = W_SystemExit.__doc__, - __module__ = 'exceptions', code = readwrite_attrproperty_w('w_code', W_SystemExit) ) @@ -658,10 +651,9 @@ """) W_UnicodeDecodeError.typedef = TypeDef( - 'UnicodeDecodeError', + 'exceptions.UnicodeDecodeError', W_UnicodeError.typedef, __doc__ = W_UnicodeDecodeError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeDecodeError), __init__ = interp2app(W_UnicodeDecodeError.descr_init), __str__ = interp2app(W_UnicodeDecodeError.descr_str), @@ -753,10 +745,9 @@ """) W_UnicodeEncodeError.typedef = TypeDef( - 'UnicodeEncodeError', + 'exceptions.UnicodeEncodeError', W_UnicodeError.typedef, __doc__ = W_UnicodeEncodeError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeEncodeError), __init__ = interp2app(W_UnicodeEncodeError.descr_init), __str__ = interp2app(W_UnicodeEncodeError.descr_str), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -57,8 +57,7 @@ return space.wrap(r) W_Count.typedef = TypeDef( - 'count', - __module__ = 'itertools', + 'itertools.count', __new__ = interp2app(W_Count___new__), __iter__ = interp2app(W_Count.iter_w), next = interp2app(W_Count.next_w), @@ -120,8 +119,7 @@ return space.wrap(r) W_Repeat.typedef = TypeDef( - 'repeat', - __module__ = 'itertools', + 'itertools.repeat', __new__ = interp2app(W_Repeat___new__), __iter__ = interp2app(W_Repeat.iter_w), __length_hint__ = interp2app(W_Repeat.length_w), @@ -174,8 +172,7 @@ W_TakeWhile.typedef = TypeDef( - 'takewhile', - __module__ = 'itertools', + 'itertools.takewhile', __new__ = interp2app(W_TakeWhile___new__), __iter__ = interp2app(W_TakeWhile.iter_w), next = interp2app(W_TakeWhile.next_w), @@ -223,8 +220,7 @@ W_DropWhile.typedef = TypeDef( - 'dropwhile', - __module__ = 'itertools', + 'itertools.dropwhile', __new__ = interp2app(W_DropWhile___new__), __iter__ = interp2app(W_DropWhile.iter_w), next = interp2app(W_DropWhile.next_w), @@ -280,8 +276,7 @@ return space.wrap(r) W_IFilter.typedef = TypeDef( - 'ifilter', - __module__ = 'itertools', + 'itertools.ifilter', __new__ = interp2app(W_IFilter___new__), __iter__ = interp2app(W_IFilter.iter_w), next = interp2app(W_IFilter.next_w), @@ -308,8 +303,7 @@ return space.wrap(r) W_IFilterFalse.typedef = TypeDef( - 'ifilterfalse', - __module__ = 'itertools', + 'itertools.ifilterfalse', __new__ = interp2app(W_IFilterFalse___new__), __iter__ = interp2app(W_IFilterFalse.iter_w), next = interp2app(W_IFilterFalse.next_w), @@ -417,8 +411,7 @@ return space.wrap(r) W_ISlice.typedef = TypeDef( - 'islice', - __module__ = 'itertools', + 'itertools.islice', __new__ = interp2app(W_ISlice___new__), __iter__ = interp2app(W_ISlice.iter_w), next = interp2app(W_ISlice.next_w), @@ -482,8 +475,7 @@ return space.wrap(r) W_Chain.typedef = TypeDef( - 'chain', - __module__ = 'itertools', + 'itertools.chain', __new__ = interp2app(W_Chain___new__), __iter__ = interp2app(W_Chain.iter_w), next = interp2app(W_Chain.next_w), @@ -564,8 +556,7 @@ return space.wrap(r) W_IMap.typedef = TypeDef( - 'imap', - __module__ = 'itertools', + 'itertools.imap', __new__ = interp2app(W_IMap___new__), __iter__ = interp2app(W_IMap.iter_w), next = interp2app(W_IMap.next_w), @@ -609,8 +600,7 @@ return space.wrap(r) W_IZip.typedef = TypeDef( - 'izip', - __module__ = 'itertools', + 'itertools.izip', __new__ = interp2app(W_IZip___new__), __iter__ = interp2app(W_IZip.iter_w), next = interp2app(W_IZip.next_w), @@ -678,8 +668,7 @@ return space.wrap(self) W_IZipLongest.typedef = TypeDef( - 'izip_longest', - __module__ = 'itertools', + 'itertools.izip_longest', __new__ = interp2app(W_IZipLongest___new__), __iter__ = interp2app(W_IZipLongest.iter_w), next = interp2app(W_IZipLongest.next_w), @@ -737,8 +726,7 @@ return space.wrap(r) W_Cycle.typedef = TypeDef( - 'cycle', - __module__ = 'itertools', + 'itertools.cycle', __new__ = interp2app(W_Cycle___new__), __iter__ = interp2app(W_Cycle.iter_w), next = interp2app(W_Cycle.next_w), @@ -778,8 +766,7 @@ return space.wrap(r) W_StarMap.typedef = TypeDef( - 'starmap', - __module__ = 'itertools', + 'itertools.starmap', __new__ = interp2app(W_StarMap___new__), __iter__ = interp2app(W_StarMap.iter_w), next = interp2app(W_StarMap.next_w), @@ -879,8 +866,7 @@ myiter.chained_list)) W_TeeIterable.typedef = TypeDef( - '_tee', - __module__ = 'itertools', + 'itertools._tee', __new__ = interp2app(W_TeeIterable___new__), __iter__ = interp2app(W_TeeIterable.iter_w), next = interp2app(W_TeeIterable.next_w), @@ -983,8 +969,7 @@ return space.wrap(r) W_GroupBy.typedef = TypeDef( - 'groupby', - __module__ = 'itertools', + 'itertools.groupby', __new__ = interp2app(W_GroupBy___new__), __iter__ = interp2app(W_GroupBy.iter_w), next = interp2app(W_GroupBy.next_w), @@ -1031,8 +1016,7 @@ return w_obj W_GroupByIterator.typedef = TypeDef( - '_groupby', - __module__ = 'itertools', + 'itertools._groupby', __iter__ = interp2app(W_GroupByIterator.iter_w), next = interp2app(W_GroupByIterator.next_w)) W_GroupByIterator.typedef.acceptable_as_base_class = False @@ -1063,8 +1047,7 @@ return space.wrap(r) W_Compress.typedef = TypeDef( - 'compress', - __module__ = 'itertools', + 'itertools.compress', __new__ = interp2app(W_Compress__new__), __iter__ = interp2app(W_Compress.iter_w), next = interp2app(W_Compress.next_w), @@ -1159,8 +1142,7 @@ return space.wrap(r) W_Product.typedef = TypeDef( - 'product', - __module__ = 'itertools', + 'itertools.product', __new__ = interp2app(W_Product__new__), __iter__ = interp2app(W_Product.iter_w), next = interp2app(W_Product.next_w), @@ -1263,8 +1245,7 @@ res.__init__(space, pool_w, indices, r) return space.wrap(res) -W_Combinations.typedef = TypeDef("combinations", - __module__ = 'itertools', +W_Combinations.typedef = TypeDef("itertools.combinations", __new__ = interp2app(W_Combinations__new__), __iter__ = interp2app(W_Combinations.descr__iter__), next = interp2app(W_Combinations.descr_next), @@ -1298,8 +1279,8 @@ res.__init__(space, pool_w, indices, r) return space.wrap(res) -W_CombinationsWithReplacement.typedef = TypeDef("combinations_with_replacement", - __module__ = 'itertools', +W_CombinationsWithReplacement.typedef = TypeDef( + "itertools.combinations_with_replacement", __new__ = interp2app(W_CombinationsWithReplacement__new__), __iter__ = interp2app(W_CombinationsWithReplacement.descr__iter__), next = interp2app(W_CombinationsWithReplacement.descr_next), @@ -1364,8 +1345,7 @@ res.__init__(space, pool_w, r) return space.wrap(res) -W_Permutations.typedef = TypeDef("permutations", - __module__ = 'itertools', +W_Permutations.typedef = TypeDef("itertools.permutations", __new__ = interp2app(W_Permutations__new__), __iter__ = interp2app(W_Permutations.descr__iter__), next = interp2app(W_Permutations.descr_next), diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -546,9 +546,7 @@ return W_UnicodeBox(arr, 0, arr.dtype) -W_GenericBox.typedef = TypeDef("generic", - __module__ = "numpy", - +W_GenericBox.typedef = TypeDef("numpy.generic", __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), @@ -639,181 +637,151 @@ flags = GetSetProperty(W_GenericBox.descr_get_flags), ) -W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, - __module__ = "numpy", +W_BoolBox.typedef = TypeDef("numpy.bool_", W_GenericBox.typedef, __new__ = interp2app(W_BoolBox.descr__new__.im_func), __index__ = interp2app(W_BoolBox.descr_index), __reduce__ = interp2app(W_BoolBox.descr_reduce), ) -W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, - __module__ = "numpy", +W_NumberBox.typedef = TypeDef("numpy.number", W_GenericBox.typedef, ) -W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, - __module__ = "numpy", +W_IntegerBox.typedef = TypeDef("numpy.integer", W_NumberBox.typedef, ) -W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, - __module__ = "numpy", +W_SignedIntegerBox.typedef = TypeDef("numpy.signedinteger", W_IntegerBox.typedef, ) -W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, - __module__ = "numpy", +W_UnsignedIntegerBox.typedef = TypeDef("numpy.unsignedinteger", W_IntegerBox.typedef, ) -W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, - __module__ = "numpy", +W_Int8Box.typedef = TypeDef("numpy.int8", W_SignedIntegerBox.typedef, __new__ = interp2app(W_Int8Box.descr__new__.im_func), __index__ = interp2app(W_Int8Box.descr_index), __reduce__ = interp2app(W_Int8Box.descr_reduce), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt8Box.typedef = TypeDef("numpy.uint8", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt8Box.descr__new__.im_func), __index__ = interp2app(W_UInt8Box.descr_index), __reduce__ = interp2app(W_UInt8Box.descr_reduce), ) -W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, - __module__ = "numpy", +W_Int16Box.typedef = TypeDef("numpy.int16", W_SignedIntegerBox.typedef, __new__ = interp2app(W_Int16Box.descr__new__.im_func), __index__ = interp2app(W_Int16Box.descr_index), __reduce__ = interp2app(W_Int16Box.descr_reduce), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt16Box.typedef = TypeDef("numpy.uint16", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt16Box.descr__new__.im_func), __index__ = interp2app(W_UInt16Box.descr_index), __reduce__ = interp2app(W_UInt16Box.descr_reduce), ) -W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, - __module__ = "numpy", +W_Int32Box.typedef = TypeDef("numpy.int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __new__ = interp2app(W_Int32Box.descr__new__.im_func), __index__ = interp2app(W_Int32Box.descr_index), __reduce__ = interp2app(W_Int32Box.descr_reduce), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt32Box.typedef = TypeDef("numpy.uint32", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt32Box.descr__new__.im_func), __index__ = interp2app(W_UInt32Box.descr_index), __reduce__ = interp2app(W_UInt32Box.descr_reduce), ) -W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, - __module__ = "numpy", +W_Int64Box.typedef = TypeDef("numpy.int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __new__ = interp2app(W_Int64Box.descr__new__.im_func), __index__ = interp2app(W_Int64Box.descr_index), __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt64Box.typedef = TypeDef("numpy.uint64", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt64Box.descr__new__.im_func), __index__ = interp2app(W_UInt64Box.descr_index), __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) -W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, +W_LongBox.typedef = TypeDef("numpy.int%d" % LONG_BIT, (W_SignedIntegerBox.typedef, W_IntObject.typedef), - __module__ = "numpy", __new__ = interp2app(W_LongBox.descr__new__.im_func), __index__ = interp2app(W_LongBox.descr_index), __reduce__ = interp2app(W_LongBox.descr_reduce), ) -W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_ULongBox.typedef = TypeDef("numpy.uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_ULongBox.descr__new__.im_func), __index__ = interp2app(W_ULongBox.descr_index), __reduce__ = interp2app(W_ULongBox.descr_reduce), ) -W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, - __module__ = "numpy", +W_InexactBox.typedef = TypeDef("numpy.inexact", W_NumberBox.typedef, ) -W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, - __module__ = "numpy", +W_FloatingBox.typedef = TypeDef("numpy.floating", W_InexactBox.typedef, ) -W_Float16Box.typedef = TypeDef("float16", W_FloatingBox.typedef, - __module__ = "numpy", +W_Float16Box.typedef = TypeDef("numpy.float16", W_FloatingBox.typedef, __new__ = interp2app(W_Float16Box.descr__new__.im_func), __reduce__ = interp2app(W_Float16Box.descr_reduce), ) -W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, - __module__ = "numpy", +W_Float32Box.typedef = TypeDef("numpy.float32", W_FloatingBox.typedef, __new__ = interp2app(W_Float32Box.descr__new__.im_func), __reduce__ = interp2app(W_Float32Box.descr_reduce), ) -W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), - __module__ = "numpy", +W_Float64Box.typedef = TypeDef("numpy.float64", (W_FloatingBox.typedef, float_typedef), __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) -W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, - __module__ = "numpy", +W_ComplexFloatingBox.typedef = TypeDef("numpy.complexfloating", W_InexactBox.typedef, ) -W_Complex64Box.typedef = TypeDef("complex64", (W_ComplexFloatingBox.typedef), - __module__ = "numpy", +W_Complex64Box.typedef = TypeDef("numpy.complex64", (W_ComplexFloatingBox.typedef), __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), ) -W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", +W_Complex128Box.typedef = TypeDef("numpy.complex128", (W_ComplexFloatingBox.typedef, complex_typedef), __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), ) if long_double_size in (8, 12, 16): - W_FloatLongBox.typedef = TypeDef("float%d" % (long_double_size * 8), (W_FloatingBox.typedef), - __module__ = "numpy", + W_FloatLongBox.typedef = TypeDef("numpy.float%d" % (long_double_size * 8), (W_FloatingBox.typedef), __new__ = interp2app(W_FloatLongBox.descr__new__.im_func), __reduce__ = interp2app(W_FloatLongBox.descr_reduce), ) - W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", + W_ComplexLongBox.typedef = TypeDef("numpy.complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), ) -W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, - __module__ = "numpy", +W_FlexibleBox.typedef = TypeDef("numpy.flexible", W_GenericBox.typedef, ) -W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, - __module__ = "numpy", +W_VoidBox.typedef = TypeDef("numpy.void", W_FlexibleBox.typedef, __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), ) -W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, - __module__ = "numpy", +W_CharacterBox.typedef = TypeDef("numpy.character", W_FlexibleBox.typedef, ) -W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, W_BytesObject.typedef), - __module__ = "numpy", +W_StringBox.typedef = TypeDef("numpy.string_", (W_CharacterBox.typedef, W_BytesObject.typedef), __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), __len__ = interp2app(W_StringBox.descr_len), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), - __module__ = "numpy", +W_UnicodeBox.typedef = TypeDef("numpy.unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), ) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -132,6 +132,8 @@ def get_name(self): name = self.w_box_type.name + if name.startswith('numpy.'): + name = name[6:] if name.endswith('_'): name = name[:-1] return name @@ -557,8 +559,7 @@ raise oefmt(space.w_TypeError, "data type not understood") -W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", +W_Dtype.typedef = TypeDef("numpy.dtype", __new__ = interp2app(descr__new__), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -62,8 +62,7 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) -W_FlagsObject.typedef = TypeDef("flagsobj", - __module__ = "numpy", +W_FlagsObject.typedef = TypeDef("numpy.flagsobj", __new__ = interp2app(W_FlagsObject.descr__new__.im_func), __getitem__ = interp2app(W_FlagsObject.descr_getitem), diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1318,8 +1318,7 @@ return result """, filename=__file__).interphook('searchsort') -W_NDimArray.typedef = TypeDef("ndarray", - __module__ = "numpy", +W_NDimArray.typedef = TypeDef("numpy.ndarray", __new__ = interp2app(descr_new_array), __len__ = interp2app(W_NDimArray.descr_len), @@ -1486,8 +1485,7 @@ return descr_new_array(space, w_subtype, w_shape, w_dtype) -W_FlatIterator.typedef = TypeDef("flatiter", - __module__ = "numpy", +W_FlatIterator.typedef = TypeDef("numpy.flatiter", __iter__ = interp2app(W_FlatIterator.descr_iter), __getitem__ = interp2app(W_FlatIterator.descr_getitem), __setitem__ = interp2app(W_FlatIterator.descr_setitem), diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -497,8 +497,7 @@ return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, w_buffersize, order) -W_NDIter.typedef = TypeDef('nditer', - __module__ = 'numpy', +W_NDIter.typedef = TypeDef('numpy.nditer', __new__ = interp2app(descr__new__), __iter__ = interp2app(W_NDIter.descr_iter), diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -457,9 +457,7 @@ res_dtype, w_lhs, w_rhs, out) -W_Ufunc.typedef = TypeDef("ufunc", - __module__ = "numpy", - +W_Ufunc.typedef = TypeDef("numpy.ufunc", __call__ = interp2app(W_Ufunc.descr_call), From noreply at buildbot.pypy.org Sat May 3 03:26:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:26:02 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140503012602.755041C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71217:5ff0f9f58158 Date: 2014-05-02 21:24 -0400 http://bitbucket.org/pypy/pypy/changeset/5ff0f9f58158/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,5 @@ .. this is a revision shortly after release-2.3.x .. startrev: 773fc6275c69 +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name From noreply at buildbot.pypy.org Sat May 3 03:54:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:54:00 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: backout premature merge of reflex-support: doesn't translate Message-ID: <20140503015400.CF51D1C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71218:773bc2c8bfc5 Date: 2014-05-02 21:51 -0400 http://bitbucket.org/pypy/pypy/changeset/773bc2c8bfc5/ Log: backout premature merge of reflex-support: doesn't translate diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.ULONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.LONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) + entry = space.int_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -91,7 +91,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = nt.new_primitive_type(space, 'long') c_scope = c_opaque_ptr c_type = c_scope @@ -259,10 +259,10 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) + return rffi.cast(C_OBJECT, space.int_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -281,12 +281,12 @@ def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) + return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): @@ -302,7 +302,7 @@ call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) + return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) @@ -452,7 +452,7 @@ def c_get_method(space, cppscope, index): args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) + return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef unsigned long cppyy_scope_t; + typedef long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef unsigned long cppyy_object_t; - typedef unsigned long cppyy_method_t; + typedef long cppyy_object_t; + typedef long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,6 +7,8 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") + if sys.maxsize < 2 ** 31: + py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") From noreply at buildbot.pypy.org Sat May 3 03:54:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:54:22 +0200 (CEST) Subject: [pypy-commit] pypy default: backout premature merge of reflex-support: doesn't translate Message-ID: <20140503015422.19CD91C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71219:56fa4b56da86 Date: 2014-05-02 21:51 -0400 http://bitbucket.org/pypy/pypy/changeset/56fa4b56da86/ Log: backout premature merge of reflex-support: doesn't translate diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.ULONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.LONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) + entry = space.int_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -91,7 +91,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = nt.new_primitive_type(space, 'long') c_scope = c_opaque_ptr c_type = c_scope @@ -259,10 +259,10 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) + return rffi.cast(C_OBJECT, space.int_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -281,12 +281,12 @@ def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) + return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): @@ -302,7 +302,7 @@ call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) + return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) @@ -452,7 +452,7 @@ def c_get_method(space, cppscope, index): args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) + return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef unsigned long cppyy_scope_t; + typedef long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef unsigned long cppyy_object_t; - typedef unsigned long cppyy_method_t; + typedef long cppyy_object_t; + typedef long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,6 +7,8 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") + if sys.maxsize < 2 ** 31: + py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") From noreply at buildbot.pypy.org Sat May 3 03:59:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 03:59:35 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix cpyext version number Message-ID: <20140503015935.9362E1C088E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71220:12b3395cfa84 Date: 2014-05-02 21:59 -0400 http://bitbucket.org/pypy/pypy/changeset/12b3395cfa84/ Log: test/fix cpyext version number diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,7 +21,7 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 3 +#define PY_MICRO_VERSION 6 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -9,11 +9,17 @@ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); PyModule_AddStringConstant(m, "py_version", PY_VERSION); + PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION); + PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION); + PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION); PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION); } """ module = self.import_module(name='foo', init=init) assert module.py_version == sys.version[:5] + assert module.py_major_version == sys.version_info.major + assert module.py_minor_version == sys.version_info.minor + assert module.py_micro_version == sys.version_info.micro v = sys.pypy_version_info s = '%d.%d.%d' % (v[0], v[1], v[2]) if v.releaselevel != 'final': From noreply at buildbot.pypy.org Sat May 3 07:30:28 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 May 2014 07:30:28 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: untangle the use of signed and unsigned integers for the benefit of the rtyper Message-ID: <20140503053028.C6BD21C0299@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71221:1481152c36d5 Date: 2014-05-02 22:22 -0700 http://bitbucket.org/pypy/pypy/changeset/1481152c36d5/ Log: untangle the use of signed and unsigned integers for the benefit of the rtyper diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -21,10 +21,11 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, l = 0, s = '', vp = rffi.cast(rffi.VOIDP, 0) ): - self._long = l + def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + self._handle = h + self._long = l self._string = s - self._voidp = vp + self._voidp = vp # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -57,7 +58,7 @@ if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): - misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._long), argtype.size) + misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) elif obj._voidp != rffi.cast(rffi.VOIDP, 0): data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp @@ -116,6 +117,8 @@ c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') + c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') + self.capi_call_ifaces = { # name to opaque C++ scope representation 'num_scopes' : ([c_scope], c_int), @@ -152,7 +155,7 @@ 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer - 'allocate_function_args' : ([c_size_t], c_voidp), + 'allocate_function_args' : ([c_int], c_voidp), 'deallocate_function_args' : ([c_voidp], c_void), 'function_arg_sizeof' : ([], c_size_t), 'function_arg_typeoffset' : ([], c_size_t), @@ -169,7 +172,7 @@ 'base_name' : ([c_type, c_int], c_ccharp), 'is_subtype' : ([c_type, c_type], c_int), - 'base_offset' : ([c_type, c_type, c_object, c_int], c_long), + 'base_offset' : ([c_type, c_type, c_object, c_int], c_ptrdiff_t), # method/function reflection information 'num_methods' : ([c_scope], c_int), @@ -199,7 +202,7 @@ 'num_datamembers' : ([c_scope], c_int), 'datamember_name' : ([c_scope, c_int], c_ccharp), 'datamember_type' : ([c_scope, c_int], c_ccharp), - 'datamember_offset' : ([c_scope, c_int], c_size_t), + 'datamember_offset' : ([c_scope, c_int], c_ptrdiff_t), 'datamember_index' : ([c_scope, c_ccharp], c_int), @@ -264,6 +267,9 @@ def _cdata_to_size_t(space, w_cdata): return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) +def _cdata_to_ptrdiff_t(space, w_cdata): + return rffi.cast(rffi.LONG, space.int_w(w_cdata)) + def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False)._cdata) @@ -273,9 +279,9 @@ # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(l=cppscope.handle), _Arg(l=iscope)] + args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): @@ -285,62 +291,62 @@ def c_get_template(space, name): return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] + args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(l=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return call_capi(space, 'call_s', args) def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(l=cppclass.handle)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHPTRGETTER_PTR, _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) @@ -358,47 +364,47 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(l=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) def c_is_enum(space, name): return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(l=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(l=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] + args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] - return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) + args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: - return rffi.cast(rffi.SIZE_T, 0) + return rffi.cast(rffi.LONG, 0) return _c_base_offset(space, derived.handle, base.handle, address, direction) def c_base_offset1(space, derived_h, base, address, direction): return _c_base_offset(space, derived_h, base.handle, address, direction) # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(l=cppscope.handle)] + args = [_Arg(h=cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(l=cppscope.handle), _Arg(l=imethod)] + args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -414,36 +420,36 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(l=cppscope.handle) + arg1 = _Arg(h=cppscope.handle) arg2 = _Arg(l=index) args = [c_resolve_name(space, charp2str_free(space, call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) @@ -451,45 +457,45 @@ return args def c_get_method(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] + args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] - return _cdata_to_size_t(space, call_capi(space, 'datamember_offset', args)) + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- @@ -509,7 +515,7 @@ def c_charp2stdstring(space, svalue): return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -102,7 +102,7 @@ _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] libffitype = jit_libffi.types.slong - c_type = rffi.LONG + c_type = rffi.LONG c_ptrtype = rffi.LONGP def _unwrap_object(self, space, w_obj): diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -48,7 +48,7 @@ cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_scope_t scope, cppyy_index_t idx); /* handling of function argument buffer ----------------------------------- */ - void* cppyy_allocate_function_args(size_t nargs); + void* cppyy_allocate_function_args(int nargs); void cppyy_deallocate_function_args(void* args); size_t cppyy_function_arg_sizeof(); size_t cppyy_function_arg_typeoffset(); @@ -66,7 +66,7 @@ int cppyy_is_subtype(cppyy_type_t derived, cppyy_type_t base); /* calculate offsets between declared and actual type, up-cast: direction > 0; down-cast: direction < 0 */ - size_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); + ptrdiff_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t scope); @@ -97,7 +97,7 @@ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); - size_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); + ptrdiff_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); int cppyy_datamember_index(cppyy_scope_t scope, const char* name); diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -520,12 +520,12 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { assert(sizeof(CPPYY_G__value) == sizeof(G__value)); G__param* libp = (G__param*)malloc( offsetof(G__param, para) + nargs*sizeof(CPPYY_G__value)); libp->paran = (int)nargs; - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) libp->para[i].type = 'l'; return (void*)libp->para; } @@ -613,7 +613,7 @@ return derived_type->GetBaseClass(base_type) != 0; } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { R__LOCKGUARD2(gCINTMutex); @@ -642,7 +642,7 @@ } } - return (size_t) offset; // may be negative (will roll over) + return (ptrdiff_t) offset; // may be negative (will roll over) } @@ -941,16 +941,16 @@ return cppstring_to_cstring(gbl.GetFullTypeName()); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); - return (size_t)m->GetOffsetCint(); + return (ptrdiff_t)m->GetOffsetCint(); } assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; - return (size_t)gbl.GetAddress(); + return (ptrdiff_t)gbl.GetAddress(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -50,12 +50,12 @@ struct Cppyy_PseudoDatambrInfo { Cppyy_PseudoDatambrInfo(const std::string& name, const std::string& type, - size_t offset, bool isstatic) : + ptrdiff_t offset, bool isstatic) : m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} std::string m_name; std::string m_type; - size_t m_offset; + ptrdiff_t m_offset; bool m_isstatic; }; @@ -120,7 +120,7 @@ #define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ - (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + (ptrdiff_t)&dummy::cppyy_test_data::s_##dmname, true)) struct Cppyy_InitPseudoReflectionInfo { @@ -765,9 +765,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -900,7 +900,7 @@ return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { return s_scopes[handle].m_datambrs[idatambr].m_offset; } diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -212,9 +212,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -310,7 +310,7 @@ return (int)derived_type.HasBase(base_type); } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int direction) { Reflex::Type derived_type = type_from_handle(derived_handle); Reflex::Type base_type = type_from_handle(base_handle); @@ -336,8 +336,8 @@ if (ibase->first.ToType() == base_type) { long offset = (long)ibase->first.Offset((void*)address); if (direction < 0) - return (size_t) -offset; // note negative; rolls over - return (size_t)offset; + return (ptrdiff_t) -offset; // note negative; rolls over + return (ptrdiff_t)offset; } } @@ -561,12 +561,12 @@ return cppstring_to_cstring(name); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { Reflex::Scope s = scope_from_handle(handle); Reflex::Member m = s.DataMemberAt(datamember_index); if (m.IsArtificial() && m.TypeOf().IsEnum()) - return (size_t)&m.InterpreterOffset(); - return m.Offset(); + return (ptrdiff_t)&m.InterpreterOffset(); + return (ptrdiff_t)m.Offset(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -482,7 +482,7 @@ c = cppyy_test_data() assert c.get_valid_string('aap') == 'aap' - assert c.get_invalid_string() == '' + #assert c.get_invalid_string() == '' def test13_copy_contructor(self): """Test copy constructor""" diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -211,6 +211,8 @@ def test01_string_argument_passing(self): """Test mapping of python strings and std::string""" + return + import cppyy std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class @@ -242,6 +244,8 @@ def test02_string_data_access(self): """Test access to std::string object data members""" + return + import cppyy std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class @@ -261,6 +265,8 @@ def test03_string_with_null_character(self): """Test that strings with NULL do not get truncated""" + return + import cppyy std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class From noreply at buildbot.pypy.org Sat May 3 07:30:31 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 May 2014 07:30:31 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: merge default into branch for testing; update cppyy Message-ID: <20140503053031.86EE91C0299@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71222:d8ea29c152c9 Date: 2014-05-02 22:29 -0700 http://bitbucket.org/pypy/pypy/changeset/d8ea29c152c9/ Log: merge default into branch for testing; update cppyy diff too long, truncating to 2000 out of 2235 lines diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -154,7 +154,7 @@ Improve optimization of small allocation-heavy loops in the JIT .. branch: reflex-support - + .. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 .. branch: lexer_token_position_class @@ -164,4 +164,3 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr - diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,7 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0f75ad4d14ce +.. startrev: 773fc6275c69 +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -121,10 +121,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, @@ -2804,6 +2803,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(mod)), ) +mod.typedef.heaptype = True def Module_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2851,6 +2851,7 @@ __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) +Module.typedef.heaptype = True def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2898,6 +2899,7 @@ __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) +Interactive.typedef.heaptype = True def Expression_get_body(space, w_self): if w_self.w_dict is not None: @@ -2951,6 +2953,7 @@ __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) +Expression.typedef.heaptype = True def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2998,6 +3001,7 @@ __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) +Suite.typedef.heaptype = True def stmt_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -3063,6 +3067,7 @@ col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) +stmt.typedef.heaptype = True def FunctionDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3191,6 +3196,7 @@ __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) +FunctionDef.typedef.heaptype = True def ClassDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3315,6 +3321,7 @@ __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) +ClassDef.typedef.heaptype = True def Return_get_value(space, w_self): if w_self.w_dict is not None: @@ -3368,6 +3375,7 @@ __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) +Return.typedef.heaptype = True def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3415,6 +3423,7 @@ __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) +Delete.typedef.heaptype = True def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3492,6 +3501,7 @@ __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) +Assign.typedef.heaptype = True def AugAssign_get_target(space, w_self): if w_self.w_dict is not None: @@ -3605,6 +3615,7 @@ __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) +AugAssign.typedef.heaptype = True def Print_get_dest(space, w_self): if w_self.w_dict is not None: @@ -3711,6 +3722,7 @@ __new__=interp2app(get_AST_new(Print)), __init__=interp2app(Print_init), ) +Print.typedef.heaptype = True def For_get_target(space, w_self): if w_self.w_dict is not None: @@ -3842,6 +3854,7 @@ __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) +For.typedef.heaptype = True def While_get_test(space, w_self): if w_self.w_dict is not None: @@ -3943,6 +3956,7 @@ __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) +While.typedef.heaptype = True def If_get_test(space, w_self): if w_self.w_dict is not None: @@ -4044,6 +4058,7 @@ __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) +If.typedef.heaptype = True def With_get_context_expr(space, w_self): if w_self.w_dict is not None: @@ -4151,6 +4166,7 @@ __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) +With.typedef.heaptype = True def Raise_get_type(space, w_self): if w_self.w_dict is not None: @@ -4264,6 +4280,7 @@ __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) +Raise.typedef.heaptype = True def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4359,6 +4376,7 @@ __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) +TryExcept.typedef.heaptype = True def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4430,6 +4448,7 @@ __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) +TryFinally.typedef.heaptype = True def Assert_get_test(space, w_self): if w_self.w_dict is not None: @@ -4513,6 +4532,7 @@ __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) +Assert.typedef.heaptype = True def Import_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4560,6 +4580,7 @@ __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) +Import.typedef.heaptype = True def ImportFrom_get_module(space, w_self): if w_self.w_dict is not None: @@ -4668,6 +4689,7 @@ __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) +ImportFrom.typedef.heaptype = True def Exec_get_body(space, w_self): if w_self.w_dict is not None: @@ -4781,6 +4803,7 @@ __new__=interp2app(get_AST_new(Exec)), __init__=interp2app(Exec_init), ) +Exec.typedef.heaptype = True def Global_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4828,6 +4851,7 @@ __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) +Global.typedef.heaptype = True def Expr_get_value(space, w_self): if w_self.w_dict is not None: @@ -4881,6 +4905,7 @@ __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) +Expr.typedef.heaptype = True def Pass_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Pass, w_self) @@ -4898,6 +4923,7 @@ __new__=interp2app(get_AST_new(Pass)), __init__=interp2app(Pass_init), ) +Pass.typedef.heaptype = True def Break_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Break, w_self) @@ -4915,6 +4941,7 @@ __new__=interp2app(get_AST_new(Break)), __init__=interp2app(Break_init), ) +Break.typedef.heaptype = True def Continue_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Continue, w_self) @@ -4932,6 +4959,7 @@ __new__=interp2app(get_AST_new(Continue)), __init__=interp2app(Continue_init), ) +Continue.typedef.heaptype = True def expr_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -4997,6 +5025,7 @@ col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) +expr.typedef.heaptype = True def BoolOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5074,6 +5103,7 @@ __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) +BoolOp.typedef.heaptype = True def BinOp_get_left(space, w_self): if w_self.w_dict is not None: @@ -5187,6 +5217,7 @@ __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) +BinOp.typedef.heaptype = True def UnaryOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5270,6 +5301,7 @@ __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) +UnaryOp.typedef.heaptype = True def Lambda_get_args(space, w_self): if w_self.w_dict is not None: @@ -5351,6 +5383,7 @@ __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) +Lambda.typedef.heaptype = True def IfExp_get_test(space, w_self): if w_self.w_dict is not None: @@ -5464,6 +5497,7 @@ __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) +IfExp.typedef.heaptype = True def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: @@ -5535,6 +5569,7 @@ __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) +Dict.typedef.heaptype = True def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -5582,6 +5617,7 @@ __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) +Set.typedef.heaptype = True def ListComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5659,6 +5695,7 @@ __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) +ListComp.typedef.heaptype = True def SetComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5736,6 +5773,7 @@ __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) +SetComp.typedef.heaptype = True def DictComp_get_key(space, w_self): if w_self.w_dict is not None: @@ -5843,6 +5881,7 @@ __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) +DictComp.typedef.heaptype = True def GeneratorExp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5920,6 +5959,7 @@ __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) +GeneratorExp.typedef.heaptype = True def Yield_get_value(space, w_self): if w_self.w_dict is not None: @@ -5973,6 +6013,7 @@ __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +Yield.typedef.heaptype = True def Compare_get_left(space, w_self): if w_self.w_dict is not None: @@ -6074,6 +6115,7 @@ __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) +Compare.typedef.heaptype = True def Call_get_func(space, w_self): if w_self.w_dict is not None: @@ -6235,6 +6277,7 @@ __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) +Call.typedef.heaptype = True def Repr_get_value(space, w_self): if w_self.w_dict is not None: @@ -6288,6 +6331,7 @@ __new__=interp2app(get_AST_new(Repr)), __init__=interp2app(Repr_init), ) +Repr.typedef.heaptype = True def Num_get_n(space, w_self): if w_self.w_dict is not None: @@ -6340,6 +6384,7 @@ __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) +Num.typedef.heaptype = True def Str_get_s(space, w_self): if w_self.w_dict is not None: @@ -6392,6 +6437,7 @@ __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) +Str.typedef.heaptype = True def Attribute_get_value(space, w_self): if w_self.w_dict is not None: @@ -6504,6 +6550,7 @@ __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) +Attribute.typedef.heaptype = True def Subscript_get_value(space, w_self): if w_self.w_dict is not None: @@ -6617,6 +6664,7 @@ __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) +Subscript.typedef.heaptype = True def Name_get_id(space, w_self): if w_self.w_dict is not None: @@ -6699,6 +6747,7 @@ __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) +Name.typedef.heaptype = True def List_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6776,6 +6825,7 @@ __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) +List.typedef.heaptype = True def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6853,6 +6903,7 @@ __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) +Tuple.typedef.heaptype = True def Const_get_value(space, w_self): if w_self.w_dict is not None: @@ -6905,6 +6956,7 @@ __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) +Const.typedef.heaptype = True expr_context.typedef = typedef.TypeDef("expr_context", AST.typedef, @@ -6912,6 +6964,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(expr_context)), ) +expr_context.typedef.heaptype = True _Load.typedef = typedef.TypeDef("Load", expr_context.typedef, @@ -6919,6 +6972,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Load)), ) +_Load.typedef.heaptype = True _Store.typedef = typedef.TypeDef("Store", expr_context.typedef, @@ -6926,6 +6980,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Store)), ) +_Store.typedef.heaptype = True _Del.typedef = typedef.TypeDef("Del", expr_context.typedef, @@ -6933,6 +6988,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Del)), ) +_Del.typedef.heaptype = True _AugLoad.typedef = typedef.TypeDef("AugLoad", expr_context.typedef, @@ -6940,6 +6996,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugLoad)), ) +_AugLoad.typedef.heaptype = True _AugStore.typedef = typedef.TypeDef("AugStore", expr_context.typedef, @@ -6947,6 +7004,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugStore)), ) +_AugStore.typedef.heaptype = True _Param.typedef = typedef.TypeDef("Param", expr_context.typedef, @@ -6954,6 +7012,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Param)), ) +_Param.typedef.heaptype = True slice.typedef = typedef.TypeDef("slice", AST.typedef, @@ -6961,6 +7020,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(slice)), ) +slice.typedef.heaptype = True def Ellipsis_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Ellipsis, w_self) @@ -6978,6 +7038,7 @@ __new__=interp2app(get_AST_new(Ellipsis)), __init__=interp2app(Ellipsis_init), ) +Ellipsis.typedef.heaptype = True def Slice_get_lower(space, w_self): if w_self.w_dict is not None: @@ -7091,6 +7152,7 @@ __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) +Slice.typedef.heaptype = True def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: @@ -7138,6 +7200,7 @@ __new__=interp2app(get_AST_new(ExtSlice)), __init__=interp2app(ExtSlice_init), ) +ExtSlice.typedef.heaptype = True def Index_get_value(space, w_self): if w_self.w_dict is not None: @@ -7191,6 +7254,7 @@ __new__=interp2app(get_AST_new(Index)), __init__=interp2app(Index_init), ) +Index.typedef.heaptype = True boolop.typedef = typedef.TypeDef("boolop", AST.typedef, @@ -7198,6 +7262,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(boolop)), ) +boolop.typedef.heaptype = True _And.typedef = typedef.TypeDef("And", boolop.typedef, @@ -7205,6 +7270,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_And)), ) +_And.typedef.heaptype = True _Or.typedef = typedef.TypeDef("Or", boolop.typedef, @@ -7212,6 +7278,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Or)), ) +_Or.typedef.heaptype = True operator.typedef = typedef.TypeDef("operator", AST.typedef, @@ -7219,6 +7286,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(operator)), ) +operator.typedef.heaptype = True _Add.typedef = typedef.TypeDef("Add", operator.typedef, @@ -7226,6 +7294,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Add)), ) +_Add.typedef.heaptype = True _Sub.typedef = typedef.TypeDef("Sub", operator.typedef, @@ -7233,6 +7302,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Sub)), ) +_Sub.typedef.heaptype = True _Mult.typedef = typedef.TypeDef("Mult", operator.typedef, @@ -7240,6 +7310,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mult)), ) +_Mult.typedef.heaptype = True _Div.typedef = typedef.TypeDef("Div", operator.typedef, @@ -7247,6 +7318,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Div)), ) +_Div.typedef.heaptype = True _Mod.typedef = typedef.TypeDef("Mod", operator.typedef, @@ -7254,6 +7326,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mod)), ) +_Mod.typedef.heaptype = True _Pow.typedef = typedef.TypeDef("Pow", operator.typedef, @@ -7261,6 +7334,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Pow)), ) +_Pow.typedef.heaptype = True _LShift.typedef = typedef.TypeDef("LShift", operator.typedef, @@ -7268,6 +7342,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LShift)), ) +_LShift.typedef.heaptype = True _RShift.typedef = typedef.TypeDef("RShift", operator.typedef, @@ -7275,6 +7350,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_RShift)), ) +_RShift.typedef.heaptype = True _BitOr.typedef = typedef.TypeDef("BitOr", operator.typedef, @@ -7282,6 +7358,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitOr)), ) +_BitOr.typedef.heaptype = True _BitXor.typedef = typedef.TypeDef("BitXor", operator.typedef, @@ -7289,6 +7366,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitXor)), ) +_BitXor.typedef.heaptype = True _BitAnd.typedef = typedef.TypeDef("BitAnd", operator.typedef, @@ -7296,6 +7374,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitAnd)), ) +_BitAnd.typedef.heaptype = True _FloorDiv.typedef = typedef.TypeDef("FloorDiv", operator.typedef, @@ -7303,6 +7382,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_FloorDiv)), ) +_FloorDiv.typedef.heaptype = True unaryop.typedef = typedef.TypeDef("unaryop", AST.typedef, @@ -7310,6 +7390,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(unaryop)), ) +unaryop.typedef.heaptype = True _Invert.typedef = typedef.TypeDef("Invert", unaryop.typedef, @@ -7317,6 +7398,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Invert)), ) +_Invert.typedef.heaptype = True _Not.typedef = typedef.TypeDef("Not", unaryop.typedef, @@ -7324,6 +7406,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Not)), ) +_Not.typedef.heaptype = True _UAdd.typedef = typedef.TypeDef("UAdd", unaryop.typedef, @@ -7331,6 +7414,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_UAdd)), ) +_UAdd.typedef.heaptype = True _USub.typedef = typedef.TypeDef("USub", unaryop.typedef, @@ -7338,6 +7422,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_USub)), ) +_USub.typedef.heaptype = True cmpop.typedef = typedef.TypeDef("cmpop", AST.typedef, @@ -7345,6 +7430,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(cmpop)), ) +cmpop.typedef.heaptype = True _Eq.typedef = typedef.TypeDef("Eq", cmpop.typedef, @@ -7352,6 +7438,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Eq)), ) +_Eq.typedef.heaptype = True _NotEq.typedef = typedef.TypeDef("NotEq", cmpop.typedef, @@ -7359,6 +7446,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotEq)), ) +_NotEq.typedef.heaptype = True _Lt.typedef = typedef.TypeDef("Lt", cmpop.typedef, @@ -7366,6 +7454,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Lt)), ) +_Lt.typedef.heaptype = True _LtE.typedef = typedef.TypeDef("LtE", cmpop.typedef, @@ -7373,6 +7462,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LtE)), ) +_LtE.typedef.heaptype = True _Gt.typedef = typedef.TypeDef("Gt", cmpop.typedef, @@ -7380,6 +7470,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Gt)), ) +_Gt.typedef.heaptype = True _GtE.typedef = typedef.TypeDef("GtE", cmpop.typedef, @@ -7387,6 +7478,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_GtE)), ) +_GtE.typedef.heaptype = True _Is.typedef = typedef.TypeDef("Is", cmpop.typedef, @@ -7394,6 +7486,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Is)), ) +_Is.typedef.heaptype = True _IsNot.typedef = typedef.TypeDef("IsNot", cmpop.typedef, @@ -7401,6 +7494,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_IsNot)), ) +_IsNot.typedef.heaptype = True _In.typedef = typedef.TypeDef("In", cmpop.typedef, @@ -7408,6 +7502,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_In)), ) +_In.typedef.heaptype = True _NotIn.typedef = typedef.TypeDef("NotIn", cmpop.typedef, @@ -7415,6 +7510,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotIn)), ) +_NotIn.typedef.heaptype = True def comprehension_get_target(space, w_self): if w_self.w_dict is not None: @@ -7522,6 +7618,7 @@ __new__=interp2app(get_AST_new(comprehension)), __init__=interp2app(comprehension_init), ) +comprehension.typedef.heaptype = True def excepthandler_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -7587,6 +7684,7 @@ col_offset=typedef.GetSetProperty(excepthandler_get_col_offset, excepthandler_set_col_offset, excepthandler_del_col_offset, cls=excepthandler), __new__=interp2app(get_AST_new(excepthandler)), ) +excepthandler.typedef.heaptype = True def ExceptHandler_get_type(space, w_self): if w_self.w_dict is not None: @@ -7694,6 +7792,7 @@ __new__=interp2app(get_AST_new(ExceptHandler)), __init__=interp2app(ExceptHandler_init), ) +ExceptHandler.typedef.heaptype = True def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: @@ -7829,6 +7928,7 @@ __new__=interp2app(get_AST_new(arguments)), __init__=interp2app(arguments_init), ) +arguments.typedef.heaptype = True def keyword_get_arg(space, w_self): if w_self.w_dict is not None: @@ -7911,6 +8011,7 @@ __new__=interp2app(get_AST_new(keyword)), __init__=interp2app(keyword_init), ) +keyword.typedef.heaptype = True def alias_get_name(space, w_self): if w_self.w_dict is not None: @@ -7995,4 +8096,5 @@ __new__=interp2app(get_AST_new(alias)), __init__=interp2app(alias_init), ) - +alias.typedef.heaptype = True + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -352,6 +352,7 @@ if needs_init: self.emit("__init__=interp2app(%s_init)," % (name,), 1) self.emit(")") + self.emit("%s.typedef.heaptype = True" % name) self.emit("") def make_init(self, name, fields): @@ -669,10 +670,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -363,7 +363,7 @@ if fmt == 'R': result = space.str_w(space.repr(value)) elif fmt == 'T': - result = space.type(value).get_module_type_name() + result = space.type(value).name elif fmt == 'N': result = value.getname(space) else: @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).get_module_type_name() + %T - The result of space.type(w_arg).name """ if not len(args): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -22,6 +22,7 @@ else: bases = [__base] self.bases = bases + self.heaptype = False self.hasdict = '__dict__' in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.pop('__doc__', None) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -20,9 +20,9 @@ assert isinstance(ast.__version__, str) def test_flags(self): - skip("broken") from copy_reg import _HEAPTYPE - assert self.ast.Module.__flags__ & _HEAPTYPE + assert self.ast.AST.__flags__ & _HEAPTYPE == 0 + assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE def test_build_ast(self): ast = self.ast @@ -223,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert "Num' object has no attribute 'n'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'n'" x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert "Num' object has no attribute 'lineno'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'lineno'" y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert "Num' object has no attribute 'foobar'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'foobar'" x = ast.Num(lineno=2) assert x.lineno == 2 @@ -407,7 +407,7 @@ def test_issue1673_Num_fullinit(self): import ast - import copy + import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) assert num_node.n == num_node2.n diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -56,8 +56,7 @@ raise MiniBuffer.typedef = TypeDef( - "buffer", - __module__ = "_cffi_backend", + "_cffi_backend.buffer", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -441,7 +441,7 @@ W_CData.typedef = TypeDef( - 'CData', + '_cffi_backend.CData', __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -130,8 +130,7 @@ return self.ctitem.convert_to_object(result) W_CDataIter.typedef = TypeDef( - 'CDataIter', - __module__ = '_cffi_backend', + '_cffi_backend.CDataIter', __iter__ = interp2app(W_CDataIter.iter_w), next = interp2app(W_CDataIter.next_w), ) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -210,8 +210,7 @@ W_CType.typedef = TypeDef( - 'CTypeDescr', - __module__ = '_cffi_backend', + '_cffi_backend.CTypeDescr', __repr__ = interp2app(W_CType.repr), __weakref__ = make_weakref_descr(W_CType), kind = GetSetProperty(W_CType.fget_kind, doc="kind"), diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -307,8 +307,7 @@ W_CField.typedef = TypeDef( - 'CField', - __module__ = '_cffi_backend', + '_cffi_backend.CField', type = interp_attrproperty('ctype', W_CField), offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -85,8 +85,7 @@ W_Library.typedef = TypeDef( - 'Library', - __module__ = '_cffi_backend', + '_cffi_backend.Library', __repr__ = interp2app(W_Library.repr), load_function = interp2app(W_Library.load_function), read_variable = interp2app(W_Library.read_variable), diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -463,11 +463,10 @@ W_Deque.__init__(space.interp_w(W_Deque, w_self), space) return w_self -W_Deque.typedef = TypeDef("deque", +W_Deque.typedef = TypeDef("collections.deque", __doc__ = """deque(iterable[, maxlen]) --> deque object Build an ordered collection accessible from endpoints only.""", - __module__ = '_collections', __new__ = interp2app(descr__new__), __init__ = interp2app(W_Deque.init), append = interp2app(W_Deque.append), diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -4,6 +4,8 @@ def test_basics(self): from _collections import deque + assert deque.__module__ == 'collections' + d = deque(xrange(-5125, -5000)) d.__init__(xrange(200)) for i in xrange(200, 400): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -136,8 +136,7 @@ W_Continulet.typedef = TypeDef( - 'continulet', - __module__ = '_continuation', + '_continuation.continulet', __new__ = interp2app(W_Continulet___new__), __init__ = interp2app(W_Continulet.descr_init), switch = interp2app(W_Continulet.descr_switch), diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -154,8 +154,7 @@ W_Dialect.typedef = TypeDef( - 'Dialect', - __module__ = '_csv', + '_csv.Dialect', __new__ = interp2app(W_Dialect___new__), delimiter = interp_attrproperty('delimiter', W_Dialect), diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -245,8 +245,7 @@ return W_Reader(space, dialect, w_iter) W_Reader.typedef = TypeDef( - 'reader', - __module__ = '_csv', + '_csv.reader', dialect = interp_attrproperty_w('dialect', W_Reader), line_num = interp_attrproperty('line_num', W_Reader), __iter__ = interp2app(W_Reader.iter_w), diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -160,8 +160,7 @@ return W_Writer(space, dialect, w_fileobj) W_Writer.typedef = TypeDef( - 'writer', - __module__ = '_csv', + '_csv.writer', dialect = interp_attrproperty_w('dialect', W_Writer), writerow = interp2app(W_Writer.writerow), writerows = interp2app(W_Writer.writerows), diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -211,17 +211,16 @@ return space.call_method(self.w_raw, "isatty") def repr_w(self, space): - typename = space.type(self).getname(space) - module = space.str_w(space.type(self).get_module()) + typename = space.type(self).name try: w_name = space.getattr(self, space.wrap("name")) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - return space.wrap("<%s.%s>" % (module, typename,)) + return space.wrap("<%s>" % (typename,)) else: name_repr = space.str_w(space.repr(w_name)) - return space.wrap("<%s.%s name=%s>" % (module, typename, name_repr)) + return space.wrap("<%s name=%s>" % (typename, name_repr)) # ______________________________________________ @@ -844,10 +843,9 @@ self.state = STATE_OK W_BufferedReader.typedef = TypeDef( - 'BufferedReader', W_BufferedIOBase.typedef, + '_io.BufferedReader', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedReader), __init__ = interp2app(W_BufferedReader.descr_init), - __module__ = "_io", read = interp2app(W_BufferedReader.read_w), peek = interp2app(W_BufferedReader.peek_w), @@ -892,10 +890,9 @@ self.state = STATE_OK W_BufferedWriter.typedef = TypeDef( - 'BufferedWriter', W_BufferedIOBase.typedef, + '_io.BufferedWriter', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedWriter), __init__ = interp2app(W_BufferedWriter.descr_init), - __module__ = "_io", write = interp2app(W_BufferedWriter.write_w), flush = interp2app(W_BufferedWriter.flush_w), @@ -1015,10 +1012,9 @@ self.state = STATE_OK W_BufferedRandom.typedef = TypeDef( - 'BufferedRandom', W_BufferedIOBase.typedef, + '_io.BufferedRandom', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRandom), __init__ = interp2app(W_BufferedRandom.descr_init), - __module__ = "_io", read = interp2app(W_BufferedRandom.read_w), peek = interp2app(W_BufferedRandom.peek_w), diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -264,8 +264,7 @@ W_StringIO.typedef = TypeDef( - 'StringIO', W_TextIOBase.typedef, - __module__ = "_io", + '_io.StringIO', W_TextIOBase.typedef, __new__ = generic_new_descr(W_StringIO), __init__ = interp2app(W_StringIO.descr_init), __getstate__ = interp2app(W_StringIO.descr_getstate), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1015,11 +1015,10 @@ self.chunk_size = size W_TextIOWrapper.typedef = TypeDef( - 'TextIOWrapper', W_TextIOBase.typedef, + '_io.TextIOWrapper', W_TextIOBase.typedef, __new__ = generic_new_descr(W_TextIOWrapper), __init__ = interp2app(W_TextIOWrapper.descr_init), __repr__ = interp2app(W_TextIOWrapper.descr_repr), - __module__ = "_io", next = interp2app(W_TextIOWrapper.next_w), read = interp2app(W_TextIOWrapper.read_w), diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + class_name = w_realclass.name else: name = '?' if class_name is None: @@ -440,8 +440,7 @@ return space.wrap(p) W_Profiler.typedef = TypeDef( - 'Profiler', - __module__ = '_lsprof', + '_lsprof.Profiler', __new__ = interp2app(descr_new_profile), enable = interp2app(W_Profiler.enable), disable = interp2app(W_Profiler.disable), diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -75,7 +75,6 @@ MultibyteIncrementalDecoder.typedef = TypeDef( 'MultibyteIncrementalDecoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbidecoder_new), decode = interp2app(MultibyteIncrementalDecoder.decode_w), reset = interp2app(MultibyteIncrementalDecoder.reset_w), @@ -124,7 +123,6 @@ MultibyteIncrementalEncoder.typedef = TypeDef( 'MultibyteIncrementalEncoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbiencoder_new), encode = interp2app(MultibyteIncrementalEncoder.encode_w), reset = interp2app(MultibyteIncrementalEncoder.reset_w), diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -46,7 +46,6 @@ MultibyteCodec.typedef = TypeDef( 'MultibyteCodec', - __module__ = '_multibytecodec', decode = interp2app(MultibyteCodec.decode), encode = interp2app(MultibyteCodec.encode), ) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -353,9 +353,8 @@ return bool(r) W_FileConnection.typedef = TypeDef( - 'Connection', W_BaseConnection.typedef, + '_multiprocessing.Connection', W_BaseConnection.typedef, __new__ = interp2app(W_FileConnection.descr_new_file.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_FileConnection.fileno), ) @@ -534,8 +533,7 @@ if sys.platform == 'win32': W_PipeConnection.typedef = TypeDef( - 'PipeConnection', W_BaseConnection.typedef, + '_multiprocessing.PipeConnection', W_BaseConnection.typedef, __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_PipeConnection.fileno), ) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,8 +600,7 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("socket", - __module__ = "_socket", +W_RSocket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -488,9 +488,8 @@ return space.wrap(s) W_ArrayBase.typedef = TypeDef( - 'array', + 'array.array', __new__ = interp2app(w_array), - __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,7 +21,7 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 3 +#define PY_MICRO_VERSION 6 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -6,6 +6,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, interp_attrproperty, interp_attrproperty_w) +from pypy.objspace.std.typeobject import W_TypeObject from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, @@ -158,7 +159,9 @@ self.doc = doc self.func = func pyo = rffi.cast(PyObject, pto) - self.w_objclass = from_ref(space, pyo) + w_type = from_ref(space, pyo) + assert isinstance(w_type, W_TypeObject) + self.w_objclass = w_type def call(self, space, w_self, w_args, w_kw): if self.wrapper_func is None: @@ -174,7 +177,7 @@ def descr_method_repr(self): return self.space.wrap("" % (self.method_name, - self.w_objclass.getname(self.space))) + self.w_objclass.name)) def cwrapper_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCWrapperObject, w_self) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -33,7 +33,7 @@ assert "copy" in repr(module.fooType.copy) assert repr(module.fooType) == "" assert repr(obj2) == "" - assert repr(module.fooType.__call__) == "" + assert repr(module.fooType.__call__) == "" assert obj2(foo=1, bar=2) == dict(foo=1, bar=2) print(obj.foo) diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -9,11 +9,17 @@ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); PyModule_AddStringConstant(m, "py_version", PY_VERSION); + PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION); + PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION); + PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION); PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION); } """ module = self.import_module(name='foo', init=init) assert module.py_version == sys.version[:5] + assert module.py_major_version == sys.version_info.major + assert module.py_minor_version == sys.version_info.minor + assert module.py_micro_version == sys.version_info.micro v = sys.pypy_version_info s = '%d.%d.%d' % (v[0], v[1], v[2]) if v.releaselevel != 'final': diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -291,14 +291,9 @@ convert_getset_defs(space, dict_w, pto.c_tp_getset, self) convert_member_defs(space, dict_w, pto.c_tp_members, self) - full_name = rffi.charp2str(pto.c_tp_name) - if '.' in full_name: - module_name, extension_name = rsplit(full_name, ".", 1) - dict_w["__module__"] = space.wrap(module_name) - else: - extension_name = full_name + name = rffi.charp2str(pto.c_tp_name) - W_TypeObject.__init__(self, space, extension_name, + W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w) if not space.is_true(space.issubtype(self, space.w_type)): self.flag_cpytype = True @@ -518,7 +513,7 @@ from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: - pto.c_tp_name = rffi.str2charp(w_type.getname(space)) + pto.c_tp_name = rffi.str2charp(w_type.name) pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 # uninitialized fields: diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -207,9 +207,8 @@ return interp2app(descr_new_base_exception) W_BaseException.typedef = TypeDef( - 'BaseException', + 'exceptions.BaseException', __doc__ = W_BaseException.__doc__, - __module__ = 'exceptions', __new__ = _new(W_BaseException), __init__ = interp2app(W_BaseException.descr_init), __str__ = interp2app(W_BaseException.descr_str), @@ -244,10 +243,9 @@ for k, v in kwargs.items(): kwargs[k] = interp2app(v.__get__(None, realbase)) W_Exc.typedef = TypeDef( - name, + 'exceptions.' + name, base.typedef, __doc__ = W_Exc.__doc__, - __module__ = 'exceptions', **kwargs ) W_Exc.typedef.applevel_subclasses_base = realbase @@ -312,10 +310,9 @@ """) W_UnicodeTranslateError.typedef = TypeDef( - 'UnicodeTranslateError', + 'exceptions.UnicodeTranslateError', W_UnicodeError.typedef, __doc__ = W_UnicodeTranslateError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeTranslateError), __init__ = interp2app(W_UnicodeTranslateError.descr_init), __str__ = interp2app(W_UnicodeTranslateError.descr_str), @@ -396,10 +393,9 @@ return W_BaseException.descr_str(self, space) W_EnvironmentError.typedef = TypeDef( - 'EnvironmentError', + 'exceptions.EnvironmentError', W_StandardError.typedef, __doc__ = W_EnvironmentError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_EnvironmentError), __reduce__ = interp2app(W_EnvironmentError.descr_reduce), __init__ = interp2app(W_EnvironmentError.descr_init), @@ -453,10 +449,9 @@ _winerror_to_errno, _default_errno = {}, 22 # EINVAL W_WindowsError.typedef = TypeDef( - "WindowsError", + "exceptions.WindowsError", W_OSError.typedef, __doc__ = W_WindowsError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_WindowsError), __init__ = interp2app(W_WindowsError.descr_init), __str__ = interp2app(W_WindowsError.descr_str), @@ -557,14 +552,13 @@ return W_StandardError.descr_repr(self, space) W_SyntaxError.typedef = TypeDef( - 'SyntaxError', + 'exceptions.SyntaxError', W_StandardError.typedef, __new__ = _new(W_SyntaxError), __init__ = interp2app(W_SyntaxError.descr_init), __str__ = interp2app(W_SyntaxError.descr_str), __repr__ = interp2app(W_SyntaxError.descr_repr), __doc__ = W_SyntaxError.__doc__, - __module__ = 'exceptions', msg = readwrite_attrproperty_w('w_msg', W_SyntaxError), filename = readwrite_attrproperty_w('w_filename', W_SyntaxError), lineno = readwrite_attrproperty_w('w_lineno', W_SyntaxError), @@ -593,12 +587,11 @@ W_BaseException.descr_init(self, space, args_w) W_SystemExit.typedef = TypeDef( - 'SystemExit', + 'exceptions.SystemExit', W_BaseException.typedef, __new__ = _new(W_SystemExit), __init__ = interp2app(W_SystemExit.descr_init), __doc__ = W_SystemExit.__doc__, - __module__ = 'exceptions', code = readwrite_attrproperty_w('w_code', W_SystemExit) ) @@ -658,10 +651,9 @@ """) W_UnicodeDecodeError.typedef = TypeDef( - 'UnicodeDecodeError', + 'exceptions.UnicodeDecodeError', W_UnicodeError.typedef, __doc__ = W_UnicodeDecodeError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeDecodeError), __init__ = interp2app(W_UnicodeDecodeError.descr_init), __str__ = interp2app(W_UnicodeDecodeError.descr_str), @@ -753,10 +745,9 @@ """) W_UnicodeEncodeError.typedef = TypeDef( - 'UnicodeEncodeError', + 'exceptions.UnicodeEncodeError', W_UnicodeError.typedef, __doc__ = W_UnicodeEncodeError.__doc__, - __module__ = 'exceptions', __new__ = _new(W_UnicodeEncodeError), __init__ = interp2app(W_UnicodeEncodeError.descr_init), __str__ = interp2app(W_UnicodeEncodeError.descr_str), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -57,8 +57,7 @@ return space.wrap(r) W_Count.typedef = TypeDef( - 'count', - __module__ = 'itertools', + 'itertools.count', __new__ = interp2app(W_Count___new__), __iter__ = interp2app(W_Count.iter_w), next = interp2app(W_Count.next_w), @@ -120,8 +119,7 @@ return space.wrap(r) W_Repeat.typedef = TypeDef( - 'repeat', - __module__ = 'itertools', + 'itertools.repeat', __new__ = interp2app(W_Repeat___new__), __iter__ = interp2app(W_Repeat.iter_w), __length_hint__ = interp2app(W_Repeat.length_w), @@ -174,8 +172,7 @@ W_TakeWhile.typedef = TypeDef( - 'takewhile', - __module__ = 'itertools', + 'itertools.takewhile', __new__ = interp2app(W_TakeWhile___new__), __iter__ = interp2app(W_TakeWhile.iter_w), next = interp2app(W_TakeWhile.next_w), @@ -223,8 +220,7 @@ W_DropWhile.typedef = TypeDef( - 'dropwhile', - __module__ = 'itertools', + 'itertools.dropwhile', __new__ = interp2app(W_DropWhile___new__), __iter__ = interp2app(W_DropWhile.iter_w), next = interp2app(W_DropWhile.next_w), @@ -280,8 +276,7 @@ return space.wrap(r) W_IFilter.typedef = TypeDef( - 'ifilter', - __module__ = 'itertools', + 'itertools.ifilter', __new__ = interp2app(W_IFilter___new__), __iter__ = interp2app(W_IFilter.iter_w), next = interp2app(W_IFilter.next_w), @@ -308,8 +303,7 @@ return space.wrap(r) W_IFilterFalse.typedef = TypeDef( - 'ifilterfalse', - __module__ = 'itertools', + 'itertools.ifilterfalse', __new__ = interp2app(W_IFilterFalse___new__), __iter__ = interp2app(W_IFilterFalse.iter_w), next = interp2app(W_IFilterFalse.next_w), @@ -417,8 +411,7 @@ return space.wrap(r) W_ISlice.typedef = TypeDef( - 'islice', - __module__ = 'itertools', + 'itertools.islice', __new__ = interp2app(W_ISlice___new__), __iter__ = interp2app(W_ISlice.iter_w), next = interp2app(W_ISlice.next_w), @@ -482,8 +475,7 @@ return space.wrap(r) W_Chain.typedef = TypeDef( - 'chain', - __module__ = 'itertools', + 'itertools.chain', __new__ = interp2app(W_Chain___new__), __iter__ = interp2app(W_Chain.iter_w), next = interp2app(W_Chain.next_w), @@ -564,8 +556,7 @@ return space.wrap(r) W_IMap.typedef = TypeDef( - 'imap', - __module__ = 'itertools', + 'itertools.imap', __new__ = interp2app(W_IMap___new__), __iter__ = interp2app(W_IMap.iter_w), next = interp2app(W_IMap.next_w), @@ -609,8 +600,7 @@ return space.wrap(r) W_IZip.typedef = TypeDef( - 'izip', - __module__ = 'itertools', + 'itertools.izip', __new__ = interp2app(W_IZip___new__), __iter__ = interp2app(W_IZip.iter_w), next = interp2app(W_IZip.next_w), @@ -678,8 +668,7 @@ return space.wrap(self) W_IZipLongest.typedef = TypeDef( - 'izip_longest', - __module__ = 'itertools', + 'itertools.izip_longest', __new__ = interp2app(W_IZipLongest___new__), __iter__ = interp2app(W_IZipLongest.iter_w), next = interp2app(W_IZipLongest.next_w), @@ -737,8 +726,7 @@ return space.wrap(r) W_Cycle.typedef = TypeDef( - 'cycle', - __module__ = 'itertools', + 'itertools.cycle', __new__ = interp2app(W_Cycle___new__), __iter__ = interp2app(W_Cycle.iter_w), next = interp2app(W_Cycle.next_w), @@ -778,8 +766,7 @@ return space.wrap(r) W_StarMap.typedef = TypeDef( - 'starmap', - __module__ = 'itertools', + 'itertools.starmap', __new__ = interp2app(W_StarMap___new__), __iter__ = interp2app(W_StarMap.iter_w), next = interp2app(W_StarMap.next_w), @@ -879,8 +866,7 @@ myiter.chained_list)) W_TeeIterable.typedef = TypeDef( - '_tee', - __module__ = 'itertools', + 'itertools._tee', __new__ = interp2app(W_TeeIterable___new__), __iter__ = interp2app(W_TeeIterable.iter_w), next = interp2app(W_TeeIterable.next_w), @@ -983,8 +969,7 @@ return space.wrap(r) W_GroupBy.typedef = TypeDef( - 'groupby', - __module__ = 'itertools', + 'itertools.groupby', __new__ = interp2app(W_GroupBy___new__), __iter__ = interp2app(W_GroupBy.iter_w), next = interp2app(W_GroupBy.next_w), @@ -1031,8 +1016,7 @@ return w_obj W_GroupByIterator.typedef = TypeDef( - '_groupby', - __module__ = 'itertools', + 'itertools._groupby', __iter__ = interp2app(W_GroupByIterator.iter_w), next = interp2app(W_GroupByIterator.next_w)) W_GroupByIterator.typedef.acceptable_as_base_class = False @@ -1063,8 +1047,7 @@ return space.wrap(r) W_Compress.typedef = TypeDef( - 'compress', - __module__ = 'itertools', + 'itertools.compress', __new__ = interp2app(W_Compress__new__), __iter__ = interp2app(W_Compress.iter_w), next = interp2app(W_Compress.next_w), @@ -1159,8 +1142,7 @@ return space.wrap(r) W_Product.typedef = TypeDef( - 'product', - __module__ = 'itertools', + 'itertools.product', __new__ = interp2app(W_Product__new__), __iter__ = interp2app(W_Product.iter_w), next = interp2app(W_Product.next_w), @@ -1263,8 +1245,7 @@ res.__init__(space, pool_w, indices, r) return space.wrap(res) -W_Combinations.typedef = TypeDef("combinations", - __module__ = 'itertools', +W_Combinations.typedef = TypeDef("itertools.combinations", __new__ = interp2app(W_Combinations__new__), __iter__ = interp2app(W_Combinations.descr__iter__), next = interp2app(W_Combinations.descr_next), @@ -1298,8 +1279,8 @@ res.__init__(space, pool_w, indices, r) return space.wrap(res) -W_CombinationsWithReplacement.typedef = TypeDef("combinations_with_replacement", - __module__ = 'itertools', +W_CombinationsWithReplacement.typedef = TypeDef( + "itertools.combinations_with_replacement", __new__ = interp2app(W_CombinationsWithReplacement__new__), __iter__ = interp2app(W_CombinationsWithReplacement.descr__iter__), next = interp2app(W_CombinationsWithReplacement.descr_next), @@ -1364,8 +1345,7 @@ res.__init__(space, pool_w, r) return space.wrap(res) -W_Permutations.typedef = TypeDef("permutations", - __module__ = 'itertools', +W_Permutations.typedef = TypeDef("itertools.permutations", __new__ = interp2app(W_Permutations__new__), __iter__ = interp2app(W_Permutations.descr__iter__), next = interp2app(W_Permutations.descr_next), diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -546,9 +546,7 @@ return W_UnicodeBox(arr, 0, arr.dtype) -W_GenericBox.typedef = TypeDef("generic", - __module__ = "numpy", - +W_GenericBox.typedef = TypeDef("numpy.generic", __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), @@ -639,181 +637,151 @@ flags = GetSetProperty(W_GenericBox.descr_get_flags), ) -W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, - __module__ = "numpy", +W_BoolBox.typedef = TypeDef("numpy.bool_", W_GenericBox.typedef, __new__ = interp2app(W_BoolBox.descr__new__.im_func), __index__ = interp2app(W_BoolBox.descr_index), __reduce__ = interp2app(W_BoolBox.descr_reduce), ) -W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef, - __module__ = "numpy", +W_NumberBox.typedef = TypeDef("numpy.number", W_GenericBox.typedef, ) -W_IntegerBox.typedef = TypeDef("integer", W_NumberBox.typedef, - __module__ = "numpy", +W_IntegerBox.typedef = TypeDef("numpy.integer", W_NumberBox.typedef, ) -W_SignedIntegerBox.typedef = TypeDef("signedinteger", W_IntegerBox.typedef, - __module__ = "numpy", +W_SignedIntegerBox.typedef = TypeDef("numpy.signedinteger", W_IntegerBox.typedef, ) -W_UnsignedIntegerBox.typedef = TypeDef("unsignedinteger", W_IntegerBox.typedef, - __module__ = "numpy", +W_UnsignedIntegerBox.typedef = TypeDef("numpy.unsignedinteger", W_IntegerBox.typedef, ) -W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef, - __module__ = "numpy", +W_Int8Box.typedef = TypeDef("numpy.int8", W_SignedIntegerBox.typedef, __new__ = interp2app(W_Int8Box.descr__new__.im_func), __index__ = interp2app(W_Int8Box.descr_index), __reduce__ = interp2app(W_Int8Box.descr_reduce), ) -W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt8Box.typedef = TypeDef("numpy.uint8", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt8Box.descr__new__.im_func), __index__ = interp2app(W_UInt8Box.descr_index), __reduce__ = interp2app(W_UInt8Box.descr_reduce), ) -W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef, - __module__ = "numpy", +W_Int16Box.typedef = TypeDef("numpy.int16", W_SignedIntegerBox.typedef, __new__ = interp2app(W_Int16Box.descr__new__.im_func), __index__ = interp2app(W_Int16Box.descr_index), __reduce__ = interp2app(W_Int16Box.descr_reduce), ) -W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt16Box.typedef = TypeDef("numpy.uint16", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt16Box.descr__new__.im_func), __index__ = interp2app(W_UInt16Box.descr_index), __reduce__ = interp2app(W_UInt16Box.descr_reduce), ) -W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, - __module__ = "numpy", +W_Int32Box.typedef = TypeDef("numpy.int32", (W_SignedIntegerBox.typedef,) + MIXIN_32, __new__ = interp2app(W_Int32Box.descr__new__.im_func), __index__ = interp2app(W_Int32Box.descr_index), __reduce__ = interp2app(W_Int32Box.descr_reduce), ) -W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt32Box.typedef = TypeDef("numpy.uint32", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt32Box.descr__new__.im_func), __index__ = interp2app(W_UInt32Box.descr_index), __reduce__ = interp2app(W_UInt32Box.descr_reduce), ) -W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, - __module__ = "numpy", +W_Int64Box.typedef = TypeDef("numpy.int64", (W_SignedIntegerBox.typedef,) + MIXIN_64, __new__ = interp2app(W_Int64Box.descr__new__.im_func), __index__ = interp2app(W_Int64Box.descr_index), __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_UInt64Box.typedef = TypeDef("numpy.uint64", W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_UInt64Box.descr__new__.im_func), __index__ = interp2app(W_UInt64Box.descr_index), __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) -W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, +W_LongBox.typedef = TypeDef("numpy.int%d" % LONG_BIT, (W_SignedIntegerBox.typedef, W_IntObject.typedef), - __module__ = "numpy", __new__ = interp2app(W_LongBox.descr__new__.im_func), __index__ = interp2app(W_LongBox.descr_index), __reduce__ = interp2app(W_LongBox.descr_reduce), ) -W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, - __module__ = "numpy", +W_ULongBox.typedef = TypeDef("numpy.uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, __new__ = interp2app(W_ULongBox.descr__new__.im_func), __index__ = interp2app(W_ULongBox.descr_index), __reduce__ = interp2app(W_ULongBox.descr_reduce), ) -W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, - __module__ = "numpy", +W_InexactBox.typedef = TypeDef("numpy.inexact", W_NumberBox.typedef, ) -W_FloatingBox.typedef = TypeDef("floating", W_InexactBox.typedef, - __module__ = "numpy", +W_FloatingBox.typedef = TypeDef("numpy.floating", W_InexactBox.typedef, ) -W_Float16Box.typedef = TypeDef("float16", W_FloatingBox.typedef, - __module__ = "numpy", +W_Float16Box.typedef = TypeDef("numpy.float16", W_FloatingBox.typedef, __new__ = interp2app(W_Float16Box.descr__new__.im_func), __reduce__ = interp2app(W_Float16Box.descr_reduce), ) -W_Float32Box.typedef = TypeDef("float32", W_FloatingBox.typedef, - __module__ = "numpy", +W_Float32Box.typedef = TypeDef("numpy.float32", W_FloatingBox.typedef, __new__ = interp2app(W_Float32Box.descr__new__.im_func), __reduce__ = interp2app(W_Float32Box.descr_reduce), ) -W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), - __module__ = "numpy", +W_Float64Box.typedef = TypeDef("numpy.float64", (W_FloatingBox.typedef, float_typedef), __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) -W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, - __module__ = "numpy", +W_ComplexFloatingBox.typedef = TypeDef("numpy.complexfloating", W_InexactBox.typedef, ) -W_Complex64Box.typedef = TypeDef("complex64", (W_ComplexFloatingBox.typedef), - __module__ = "numpy", +W_Complex64Box.typedef = TypeDef("numpy.complex64", (W_ComplexFloatingBox.typedef), __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), ) -W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", +W_Complex128Box.typedef = TypeDef("numpy.complex128", (W_ComplexFloatingBox.typedef, complex_typedef), __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), ) if long_double_size in (8, 12, 16): - W_FloatLongBox.typedef = TypeDef("float%d" % (long_double_size * 8), (W_FloatingBox.typedef), - __module__ = "numpy", + W_FloatLongBox.typedef = TypeDef("numpy.float%d" % (long_double_size * 8), (W_FloatingBox.typedef), __new__ = interp2app(W_FloatLongBox.descr__new__.im_func), __reduce__ = interp2app(W_FloatLongBox.descr_reduce), ) - W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), - __module__ = "numpy", + W_ComplexLongBox.typedef = TypeDef("numpy.complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), ) -W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, - __module__ = "numpy", +W_FlexibleBox.typedef = TypeDef("numpy.flexible", W_GenericBox.typedef, ) -W_VoidBox.typedef = TypeDef("void", W_FlexibleBox.typedef, - __module__ = "numpy", +W_VoidBox.typedef = TypeDef("numpy.void", W_FlexibleBox.typedef, __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), ) -W_CharacterBox.typedef = TypeDef("character", W_FlexibleBox.typedef, - __module__ = "numpy", +W_CharacterBox.typedef = TypeDef("numpy.character", W_FlexibleBox.typedef, ) -W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, W_BytesObject.typedef), - __module__ = "numpy", +W_StringBox.typedef = TypeDef("numpy.string_", (W_CharacterBox.typedef, W_BytesObject.typedef), __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), __len__ = interp2app(W_StringBox.descr_len), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), - __module__ = "numpy", +W_UnicodeBox.typedef = TypeDef("numpy.unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), ) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -132,6 +132,8 @@ def get_name(self): name = self.w_box_type.name + if name.startswith('numpy.'): + name = name[6:] if name.endswith('_'): name = name[:-1] return name @@ -557,8 +559,7 @@ raise oefmt(space.w_TypeError, "data type not understood") -W_Dtype.typedef = TypeDef("dtype", - __module__ = "numpy", +W_Dtype.typedef = TypeDef("numpy.dtype", __new__ = interp2app(descr__new__), From noreply at buildbot.pypy.org Sat May 3 11:37:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 11:37:04 +0200 (CEST) Subject: [pypy-commit] pypy default: A corner case test, and fix Message-ID: <20140503093704.572DD1C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71223:305c557e4bc3 Date: 2014-05-03 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/305c557e4bc3/ Log: A corner case test, and fix diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -24,9 +24,31 @@ """Transform a control flow graph to make it suitable for being flattened in a JitCode. """ + constant_fold_ll_issubclass(graph, cpu) t = Transformer(cpu, callcontrol, portal_jd) t.transform(graph) +def constant_fold_ll_issubclass(graph, cpu): + # ll_issubclass can be inserted by the inliner to check exception types. + # See corner case metainterp.test.test_exception:test_catch_different_class + if cpu is None: + return + excmatch = cpu.rtyper.exceptiondata.fn_exception_match + for block in list(graph.iterblocks()): + for i, op in enumerate(block.operations): + if (op.opname == 'direct_call' and + all(isinstance(a, Constant) for a in op.args) and + op.args[0].value._obj is excmatch._obj): + constant_result = excmatch(*[a.value for a in op.args[1:]]) + block.operations[i] = SpaceOperation( + 'same_as', + [Constant(constant_result, lltype.Bool)], + op.result) + if block.exitswitch is op.result: + block.exitswitch = None + block.recloseblock(*[link for link in block.exits + if link.exitcase == constant_result]) + def integer_bounds(size, unsigned): if unsigned: return 0, 1 << (8 * size) diff --git a/rpython/jit/metainterp/test/test_exception.py b/rpython/jit/metainterp/test/test_exception.py --- a/rpython/jit/metainterp/test/test_exception.py +++ b/rpython/jit/metainterp/test/test_exception.py @@ -611,6 +611,20 @@ res = self.meta_interp(f, [0], inline=True) assert res == 30 + def test_catch_different_class(self): + def g(i): + if i < 0: + raise KeyError + return i + def f(i): + MyError(i) + try: + return g(i) + except MyError as e: + return e.n + res = self.interp_operations(f, [5], backendopt=True) + assert res == 5 + class MyError(Exception): def __init__(self, n): From noreply at buildbot.pypy.org Sat May 3 12:23:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 12:23:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/937201ff1335 (branch "marker") Message-ID: <20140503102345.3F38C1C0299@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71224:4a7722881d59 Date: 2014-05-03 10:44 +0200 http://bitbucket.org/pypy/pypy/changeset/4a7722881d59/ Log: import stmgc/937201ff1335 (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -4bde66e3b621 +937201ff1335 diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -166,32 +166,27 @@ /* For some categories, we can also collect the relevant information for the other segment. */ + char *outmarker = abort_other ? other_pseg->marker_self + : my_pseg->marker_other; switch (kind) { case WRITE_WRITE_CONTENTION: marker_fetch_obj_write(other_segment_num, obj, other_marker); + marker_expand(other_marker, other_segment_base, outmarker); break; case INEVITABLE_CONTENTION: assert(abort_other == false); other_marker[0] = other_pseg->marker_inev[0]; other_marker[1] = other_pseg->marker_inev[1]; + marker_expand(other_marker, other_segment_base, outmarker); break; + case WRITE_READ_CONTENTION: + strcpy(outmarker, ""); + break; default: - other_marker[0] = 0; - other_marker[1] = 0; + outmarker[0] = 0; break; } - marker_expand(other_marker, other_segment_base, - abort_other ? other_pseg->marker_self - : my_pseg->marker_other); - - if (abort_other && other_pseg->marker_self[0] == 0) { - if (kind == WRITE_READ_CONTENTION) - strcpy(other_pseg->marker_self, ""); - else - strcpy(other_pseg->marker_self, ""); - } - release_marker_lock(other_segment_base); } From noreply at buildbot.pypy.org Sat May 3 12:23:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 12:23:46 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add logic to display the conflicts more nicely. Getting the occasional crash for now. Message-ID: <20140503102346.6741A1C0299@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71225:0c5682724b3f Date: 2014-05-03 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/0c5682724b3f/ Log: Add logic to display the conflicts more nicely. Getting the occasional crash for now. diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py new file mode 100644 --- /dev/null +++ b/lib_pypy/atomic.py @@ -0,0 +1,87 @@ +""" +API for accessing the multithreading extensions of PyPy +""" +import thread + +try: + from __pypy__ import thread as _thread + from __pypy__.thread import atomic +except ImportError: + # Not a STM-enabled PyPy. We can still provide a version of 'atomic' + # that is good enough for our purposes. With this limited version, + # an atomic block in thread X will not prevent running thread Y, if + # thread Y is not within an atomic block at all. + atomic = thread.allocate_lock() + + def print_abort_info(mintime=0.0): + pass + +else: + import re, sys, linecache + + _timing_reasons = [ + "'outside transaction'", + "'run current'", + "'run committed'", + "'run aborted write write'", + "'run aborted write read'", + "'run aborted inevitable'", + "'run aborted other'", + "'wait free segment'", + "'wait write read'", + "'wait inevitable'", + "'wait other'", + "'sync commit soon'", + "'bookkeeping'", + "'minor gc'", + "'major gc'", + "'sync pause'", + ] + _r_line = re.compile(r'File "(.*?)", line (\d+), in ') + _fullfilenames = {} + + def print_abort_info(mintime=0.0): + a, b, c, d = _thread.longest_abort_info() + if b <= mintime: + return + print >> sys.stderr, "Conflict", + try: + reason = _timing_reasons[a] + except IndexError: + reason = "'%s'" % (a,) + print >> sys.stderr, reason, + def show(line): + print >> sys.stderr, " ", line + match = _r_line.match(line) + if match: + filename = match.group(1) + lineno = int(match.group(2)) + if filename.startswith('...'): + if filename not in _fullfilenames: + partial = filename[3:] + found = set() + for module in sys.modules.values(): + try: + modfile = object.__getattribute__(module, '__file__') + except Exception: + modfile = None + if type(modfile) is str and modfile.endswith(partial): + found.add(modfile) + if len(found) == 1: + _fullfilenames[filename], = found + else: + _fullfilenames[filename] = None + filename = _fullfilenames[filename] + line = linecache.getline(filename, lineno) + if line: + print >> sys.stderr, " ", line.strip() + if d: + print >> sys.stderr, "between two threads:" + show(c) + show(d) + else: + print >> sys.stderr, "in this thread:" + show(c) + print >> sys.stderr, 'Lost %.6f seconds.' % (b,) + print >> sys.stderr + _thread.reset_longest_abort_info() From noreply at buildbot.pypy.org Sat May 3 12:26:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 12:26:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: These must not turn the transaction inevitable Message-ID: <20140503102637.B1E791C0299@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71226:1f3e4fe9aba6 Date: 2014-05-03 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/1f3e4fe9aba6/ Log: These must not turn the transaction inevitable diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -22,6 +22,8 @@ 'jit_assembler_call', 'gc_writebarrier', 'shrink_array', 'jit_stm_transaction_break_point', 'jit_stm_should_break_transaction', + 'stm_longest_marker_state', 'stm_longest_marker_time', + 'stm_longest_marker_self', 'stm_longest_marker_other', ]) ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops()) From noreply at buildbot.pypy.org Sat May 3 12:29:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 12:29:44 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Bah, there is no point. All stm_ operations are automatically put in this set Message-ID: <20140503102944.A456B1C0299@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71227:12d9ba7402c8 Date: 2014-05-03 12:28 +0200 http://bitbucket.org/pypy/pypy/changeset/12d9ba7402c8/ Log: Bah, there is no point. All stm_ operations are automatically put in this set diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -17,13 +17,9 @@ 'gc_identityhash', 'gc_id', 'gc_can_move', 'gc__collect', 'gc_adr_of_root_stack_top', 'gc_add_memory_pressure', 'weakref_create', 'weakref_deref', - 'stm_threadlocalref_get', 'stm_threadlocalref_set', - 'stm_threadlocalref_count', 'stm_threadlocalref_addr', 'jit_assembler_call', 'gc_writebarrier', 'shrink_array', 'jit_stm_transaction_break_point', 'jit_stm_should_break_transaction', - 'stm_longest_marker_state', 'stm_longest_marker_time', - 'stm_longest_marker_self', 'stm_longest_marker_other', ]) ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops()) From noreply at buildbot.pypy.org Sat May 3 13:23:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 13:23:14 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Precision Message-ID: <20140503112314.810341C01CB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5225:90efb16ed5e7 Date: 2014-05-03 13:23 +0200 http://bitbucket.org/pypy/extradoc/changeset/90efb16ed5e7/ Log: Precision diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -224,8 +224,10 @@ to avoid this issue. In the end, fine-grained locking can transparently replace the GIL -and therefore parallelise existing applications without any -changes. It does however not provide a better synchronisation +and therefore parallelise existing applications, generally without any +changes\footnote{There are rare cases where not having atomic +bytecodes actually changes the semantics.} +It does however not provide a better synchronisation mechanism to the application like e.g. atomic blocks. %% - support of atomic blocks?\\ From noreply at buildbot.pypy.org Sat May 3 16:47:50 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 May 2014 16:47:50 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: revert the blocking of these tests Message-ID: <20140503144750.883231C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71228:dc53babe2e05 Date: 2014-05-03 07:45 -0700 http://bitbucket.org/pypy/pypy/changeset/dc53babe2e05/ Log: revert the blocking of these tests diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -211,8 +211,6 @@ def test01_string_argument_passing(self): """Test mapping of python strings and std::string""" - return - import cppyy std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class @@ -244,8 +242,6 @@ def test02_string_data_access(self): """Test access to std::string object data members""" - return - import cppyy std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class @@ -265,14 +261,10 @@ def test03_string_with_null_character(self): """Test that strings with NULL do not get truncated""" - return - import cppyy std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class - return - t0 = "aap\0noot" self.assertEqual(t0, "aap\0noot") From noreply at buildbot.pypy.org Sat May 3 16:47:52 2014 From: noreply at buildbot.pypy.org (wlav) Date: Sat, 3 May 2014 16:47:52 +0200 (CEST) Subject: [pypy-commit] pypy default: merge reflex-support into branch: 32b tests now succeed and are re-opened Message-ID: <20140503144752.01C551C0A66@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: Changeset: r71229:28680cf47437 Date: 2014-05-03 07:47 -0700 http://bitbucket.org/pypy/pypy/changeset/28680cf47437/ Log: merge reflex-support into branch: 32b tests now succeed and are re- opened diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -21,10 +21,11 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, l = 0, s = '', vp = rffi.cast(rffi.VOIDP, 0) ): - self._long = l + def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + self._handle = h + self._long = l self._string = s - self._voidp = vp + self._voidp = vp # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -57,7 +58,7 @@ if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): - misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._long), argtype.size) + misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) elif obj._voidp != rffi.cast(rffi.VOIDP, 0): data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp @@ -91,7 +92,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -116,6 +117,8 @@ c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') + c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') + self.capi_call_ifaces = { # name to opaque C++ scope representation 'num_scopes' : ([c_scope], c_int), @@ -152,7 +155,7 @@ 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer - 'allocate_function_args' : ([c_size_t], c_voidp), + 'allocate_function_args' : ([c_int], c_voidp), 'deallocate_function_args' : ([c_voidp], c_void), 'function_arg_sizeof' : ([], c_size_t), 'function_arg_typeoffset' : ([], c_size_t), @@ -169,7 +172,7 @@ 'base_name' : ([c_type, c_int], c_ccharp), 'is_subtype' : ([c_type, c_type], c_int), - 'base_offset' : ([c_type, c_type, c_object, c_int], c_long), + 'base_offset' : ([c_type, c_type, c_object, c_int], c_ptrdiff_t), # method/function reflection information 'num_methods' : ([c_scope], c_int), @@ -199,7 +202,7 @@ 'num_datamembers' : ([c_scope], c_int), 'datamember_name' : ([c_scope, c_int], c_ccharp), 'datamember_type' : ([c_scope, c_int], c_ccharp), - 'datamember_offset' : ([c_scope, c_int], c_size_t), + 'datamember_offset' : ([c_scope, c_int], c_ptrdiff_t), 'datamember_index' : ([c_scope, c_ccharp], c_int), @@ -259,10 +262,13 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) + +def _cdata_to_ptrdiff_t(space, w_cdata): + return rffi.cast(rffi.LONG, space.int_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -273,74 +279,74 @@ # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(l=cppscope.handle), _Arg(l=iscope)] + args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(l=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return call_capi(space, 'call_s', args) def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(l=cppclass.handle)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHPTRGETTER_PTR, _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) @@ -358,47 +364,47 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(l=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) def c_is_enum(space, name): return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(l=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(l=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] + args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] - return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) + args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: - return rffi.cast(rffi.SIZE_T, 0) + return rffi.cast(rffi.LONG, 0) return _c_base_offset(space, derived.handle, base.handle, address, direction) def c_base_offset1(space, derived_h, base, address, direction): return _c_base_offset(space, derived_h, base.handle, address, direction) # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(l=cppscope.handle)] + args = [_Arg(h=cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(l=cppscope.handle), _Arg(l=imethod)] + args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -414,36 +420,36 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(l=cppscope.handle) + arg1 = _Arg(h=cppscope.handle) arg2 = _Arg(l=index) args = [c_resolve_name(space, charp2str_free(space, call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) @@ -451,45 +457,45 @@ return args def c_get_method(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + args = [_Arg(h=cppscope.handle), _Arg(l=index)] + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] + args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] - return _cdata_to_size_t(space, call_capi(space, 'datamember_offset', args)) + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- @@ -509,7 +515,7 @@ def c_charp2stdstring(space, svalue): return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -102,7 +102,7 @@ _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] libffitype = jit_libffi.types.slong - c_type = rffi.LONG + c_type = rffi.LONG c_ptrtype = rffi.LONGP def _unwrap_object(self, space, w_obj): diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); @@ -48,7 +48,7 @@ cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_scope_t scope, cppyy_index_t idx); /* handling of function argument buffer ----------------------------------- */ - void* cppyy_allocate_function_args(size_t nargs); + void* cppyy_allocate_function_args(int nargs); void cppyy_deallocate_function_args(void* args); size_t cppyy_function_arg_sizeof(); size_t cppyy_function_arg_typeoffset(); @@ -66,7 +66,7 @@ int cppyy_is_subtype(cppyy_type_t derived, cppyy_type_t base); /* calculate offsets between declared and actual type, up-cast: direction > 0; down-cast: direction < 0 */ - size_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); + ptrdiff_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t scope); @@ -97,7 +97,7 @@ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); - size_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); + ptrdiff_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); int cppyy_datamember_index(cppyy_scope_t scope, const char* name); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -520,12 +520,12 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { assert(sizeof(CPPYY_G__value) == sizeof(G__value)); G__param* libp = (G__param*)malloc( offsetof(G__param, para) + nargs*sizeof(CPPYY_G__value)); libp->paran = (int)nargs; - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) libp->para[i].type = 'l'; return (void*)libp->para; } @@ -613,7 +613,7 @@ return derived_type->GetBaseClass(base_type) != 0; } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { R__LOCKGUARD2(gCINTMutex); @@ -642,7 +642,7 @@ } } - return (size_t) offset; // may be negative (will roll over) + return (ptrdiff_t) offset; // may be negative (will roll over) } @@ -941,16 +941,16 @@ return cppstring_to_cstring(gbl.GetFullTypeName()); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); - return (size_t)m->GetOffsetCint(); + return (ptrdiff_t)m->GetOffsetCint(); } assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; - return (size_t)gbl.GetAddress(); + return (ptrdiff_t)gbl.GetAddress(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -50,12 +50,12 @@ struct Cppyy_PseudoDatambrInfo { Cppyy_PseudoDatambrInfo(const std::string& name, const std::string& type, - size_t offset, bool isstatic) : + ptrdiff_t offset, bool isstatic) : m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} std::string m_name; std::string m_type; - size_t m_offset; + ptrdiff_t m_offset; bool m_isstatic; }; @@ -120,7 +120,7 @@ #define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ - (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + (ptrdiff_t)&dummy::cppyy_test_data::s_##dmname, true)) struct Cppyy_InitPseudoReflectionInfo { @@ -765,9 +765,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -900,7 +900,7 @@ return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { return s_scopes[handle].m_datambrs[idatambr].m_offset; } diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -212,9 +212,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -310,7 +310,7 @@ return (int)derived_type.HasBase(base_type); } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int direction) { Reflex::Type derived_type = type_from_handle(derived_handle); Reflex::Type base_type = type_from_handle(base_handle); @@ -336,8 +336,8 @@ if (ibase->first.ToType() == base_type) { long offset = (long)ibase->first.Offset((void*)address); if (direction < 0) - return (size_t) -offset; // note negative; rolls over - return (size_t)offset; + return (ptrdiff_t) -offset; // note negative; rolls over + return (ptrdiff_t)offset; } } @@ -561,12 +561,12 @@ return cppstring_to_cstring(name); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { Reflex::Scope s = scope_from_handle(handle); Reflex::Member m = s.DataMemberAt(datamember_index); if (m.IsArtificial() && m.TypeOf().IsEnum()) - return (size_t)&m.InterpreterOffset(); - return m.Offset(); + return (ptrdiff_t)&m.InterpreterOffset(); + return (ptrdiff_t)m.Offset(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,8 +7,6 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - if sys.maxsize < 2 ** 31: - py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") @@ -484,7 +482,7 @@ c = cppyy_test_data() assert c.get_valid_string('aap') == 'aap' - assert c.get_invalid_string() == '' + #assert c.get_invalid_string() == '' def test13_copy_contructor(self): """Test copy constructor""" diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -265,8 +265,6 @@ std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class - return - t0 = "aap\0noot" self.assertEqual(t0, "aap\0noot") From noreply at buildbot.pypy.org Sat May 3 17:05:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 17:05:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Tweaks: rstm.longest_abort_info() turns the transaction inevitable, so we need a different way to inspect it first Message-ID: <20140503150527.D56711C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71230:d6678ac9de67 Date: 2014-05-03 12:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d6678ac9de67/ Log: Tweaks: rstm.longest_abort_info() turns the transaction inevitable, so we need a different way to inspect it first diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -41,10 +41,11 @@ _fullfilenames = {} def print_abort_info(mintime=0.0): - a, b, c, d = _thread.longest_abort_info() - if b <= mintime: + info = _thread.longest_abort_info(mintime) + if info is None: return print >> sys.stderr, "Conflict", + a, b, c, d = info try: reason = _timing_reasons[a] except IndexError: diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import unwrap_spec from pypy.module.thread.error import wrap_thread_error from rpython.rtyper.lltypesystem import rffi @@ -59,10 +60,13 @@ else: return space.wrap(1) -def longest_abort_info(space): + at unwrap_spec(mintime=float) +def longest_abort_info(space, mintime=0.0): if space.config.translation.stm: - from rpython.rlib.rstm import longest_abort_info - a, b, c, d = longest_abort_info() + from rpython.rlib import rstm + if rstm.longest_marker_time() <= mintime: + return space.w_None + a, b, c, d = rstm.longest_abort_info() return space.newtuple([space.wrap(a), space.wrap(b), space.wrap(c), space.wrap(d)]) else: diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -132,6 +132,10 @@ def pop_marker(): llop.stm_pop_marker(lltype.Void) + at dont_look_inside # XXX allow looking inside this function +def longest_marker_time(): + return llop.stm_longest_marker_time(lltype.Float) + @dont_look_inside def longest_abort_info(): state = llop.stm_longest_marker_state(lltype.Signed) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -268,6 +268,7 @@ setxy(globf, retry_counter) if retry_counter < 3: rstm.abort_and_retry() + print rstm.longest_marker_time() print rstm.longest_abort_info() rstm.reset_longest_abort_info() print rstm.longest_abort_info() @@ -286,7 +287,7 @@ data = cbuilder.cmdexec('a b') # # 6 == STM_TIME_RUN_ABORTED_OTHER - import re; r = re.compile(r'\(6, 0.00\d+, , \)\n\(0, 0.00+, , \)\n$') + import re; r = re.compile(r'0.00\d+\n\(6, 0.00\d+, , \)\n\(0, 0.00+, , \)\n$') assert r.match(data) def test_weakref(self): From noreply at buildbot.pypy.org Sat May 3 17:05:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 May 2014 17:05:29 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Updates Message-ID: <20140503150529.07D141C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71231:aa05911400e3 Date: 2014-05-03 13:55 +0200 http://bitbucket.org/pypy/pypy/changeset/aa05911400e3/ Log: Updates diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -5,7 +5,7 @@ try: from __pypy__ import thread as _thread - from __pypy__.thread import atomic + from __pypy__.thread import atomic, getsegmentlimit except ImportError: # Not a STM-enabled PyPy. We can still provide a version of 'atomic' # that is good enough for our purposes. With this limited version, @@ -13,6 +13,9 @@ # thread Y is not within an atomic block at all. atomic = thread.allocate_lock() + def getsegmentlimit(): + return 1 + def print_abort_info(mintime=0.0): pass @@ -37,52 +40,53 @@ "'major gc'", "'sync pause'", ] - _r_line = re.compile(r'File "(.*?)", line (\d+), in ') + _r_line = re.compile(r'File "(.*?)[co]?", line (\d+), in ') _fullfilenames = {} def print_abort_info(mintime=0.0): info = _thread.longest_abort_info(mintime) if info is None: return - print >> sys.stderr, "Conflict", - a, b, c, d = info - try: - reason = _timing_reasons[a] - except IndexError: - reason = "'%s'" % (a,) - print >> sys.stderr, reason, - def show(line): - print >> sys.stderr, " ", line - match = _r_line.match(line) - if match: - filename = match.group(1) - lineno = int(match.group(2)) - if filename.startswith('...'): - if filename not in _fullfilenames: - partial = filename[3:] - found = set() - for module in sys.modules.values(): - try: - modfile = object.__getattribute__(module, '__file__') - except Exception: - modfile = None - if type(modfile) is str and modfile.endswith(partial): - found.add(modfile) - if len(found) == 1: - _fullfilenames[filename], = found - else: - _fullfilenames[filename] = None - filename = _fullfilenames[filename] - line = linecache.getline(filename, lineno) - if line: - print >> sys.stderr, " ", line.strip() - if d: - print >> sys.stderr, "between two threads:" - show(c) - show(d) - else: - print >> sys.stderr, "in this thread:" - show(c) - print >> sys.stderr, 'Lost %.6f seconds.' % (b,) - print >> sys.stderr - _thread.reset_longest_abort_info() + with atomic: + print >> sys.stderr, "Conflict", + a, b, c, d = info + try: + reason = _timing_reasons[a] + except IndexError: + reason = "'%s'" % (a,) + print >> sys.stderr, reason, + def show(line): + print >> sys.stderr, " ", line + match = _r_line.match(line) + if match and match.group(1) != '?': + filename = match.group(1) + lineno = int(match.group(2)) + if filename.startswith('...'): + if filename not in _fullfilenames: + partial = filename[3:] + found = set() + for module in sys.modules.values(): + try: + modfile = object.__getattribute__(module, '__file__') + except Exception: + modfile = None + if type(modfile) is str and modfile.endswith(partial): + found.add(modfile) + if len(found) == 1: + _fullfilenames[filename], = found + else: + _fullfilenames[filename] = None + filename = _fullfilenames[filename] + line = linecache.getline(filename, lineno) + if line: + print >> sys.stderr, " ", line.strip() + if d: + print >> sys.stderr, "between two threads:" + show(c) + show(d) + else: + print >> sys.stderr, "in this thread:" + show(c) + print >> sys.stderr, 'Lost %.6f seconds.' % (b,) + print >> sys.stderr + _thread.reset_longest_abort_info() From noreply at buildbot.pypy.org Sat May 3 21:12:29 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 May 2014 21:12:29 +0200 (CEST) Subject: [pypy-commit] pypy default: document branches for release branch Message-ID: <20140503191229.BB19B1C01CB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71232:ec864bd08d50 Date: 2014-05-03 21:42 +0300 http://bitbucket.org/pypy/pypy/changeset/ec864bd08d50/ Log: document branches for release branch diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -87,6 +87,10 @@ * Support for corner cases on objects with __int__ and __float__ methods +* Fix multithreaded support for gethostbyname_ex and gethostbyaddr + +* Fix handling of tp_name for type objects + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -164,3 +164,6 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr + +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name From noreply at buildbot.pypy.org Sat May 3 21:12:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 May 2014 21:12:30 +0200 (CEST) Subject: [pypy-commit] pypy default: restart whatsnew-head Message-ID: <20140503191230.DFF291C01CB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71233:73c63eb1efb0 Date: 2014-05-03 21:43 +0300 http://bitbucket.org/pypy/pypy/changeset/73c63eb1efb0/ Log: restart whatsnew-head diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,7 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 773fc6275c69 - -.. branch: fix-tpname -Changes hacks surrounding W_TypeObject.name to match CPython's tp_name +.. startrev: ec864bd08d50 From noreply at buildbot.pypy.org Sat May 3 21:12:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 May 2014 21:12:33 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge branch into default Message-ID: <20140503191233.8E29E1C01CB@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71234:0f4cd1fb6314 Date: 2014-05-03 22:11 +0300 http://bitbucket.org/pypy/pypy/changeset/0f4cd1fb6314/ Log: merge branch into default diff too long, truncating to 2000 out of 2974 lines diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -87,6 +87,10 @@ * Support for corner cases on objects with __int__ and __float__ methods +* Fix multithreaded support for gethostbyname_ex and gethostbyaddr + +* Fix handling of tp_name for type objects + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -164,3 +164,6 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr + +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 773fc6275c69 - +.. startrev: ec864bd08d50 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -121,10 +121,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, @@ -2804,6 +2803,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(mod)), ) +mod.typedef.heaptype = True def Module_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2851,6 +2851,7 @@ __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) +Module.typedef.heaptype = True def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2898,6 +2899,7 @@ __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) +Interactive.typedef.heaptype = True def Expression_get_body(space, w_self): if w_self.w_dict is not None: @@ -2951,6 +2953,7 @@ __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) +Expression.typedef.heaptype = True def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2998,6 +3001,7 @@ __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) +Suite.typedef.heaptype = True def stmt_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -3063,6 +3067,7 @@ col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) +stmt.typedef.heaptype = True def FunctionDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3191,6 +3196,7 @@ __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) +FunctionDef.typedef.heaptype = True def ClassDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3315,6 +3321,7 @@ __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) +ClassDef.typedef.heaptype = True def Return_get_value(space, w_self): if w_self.w_dict is not None: @@ -3368,6 +3375,7 @@ __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) +Return.typedef.heaptype = True def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3415,6 +3423,7 @@ __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) +Delete.typedef.heaptype = True def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3492,6 +3501,7 @@ __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) +Assign.typedef.heaptype = True def AugAssign_get_target(space, w_self): if w_self.w_dict is not None: @@ -3605,6 +3615,7 @@ __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) +AugAssign.typedef.heaptype = True def Print_get_dest(space, w_self): if w_self.w_dict is not None: @@ -3711,6 +3722,7 @@ __new__=interp2app(get_AST_new(Print)), __init__=interp2app(Print_init), ) +Print.typedef.heaptype = True def For_get_target(space, w_self): if w_self.w_dict is not None: @@ -3842,6 +3854,7 @@ __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) +For.typedef.heaptype = True def While_get_test(space, w_self): if w_self.w_dict is not None: @@ -3943,6 +3956,7 @@ __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) +While.typedef.heaptype = True def If_get_test(space, w_self): if w_self.w_dict is not None: @@ -4044,6 +4058,7 @@ __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) +If.typedef.heaptype = True def With_get_context_expr(space, w_self): if w_self.w_dict is not None: @@ -4151,6 +4166,7 @@ __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) +With.typedef.heaptype = True def Raise_get_type(space, w_self): if w_self.w_dict is not None: @@ -4264,6 +4280,7 @@ __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) +Raise.typedef.heaptype = True def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4359,6 +4376,7 @@ __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) +TryExcept.typedef.heaptype = True def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4430,6 +4448,7 @@ __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) +TryFinally.typedef.heaptype = True def Assert_get_test(space, w_self): if w_self.w_dict is not None: @@ -4513,6 +4532,7 @@ __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) +Assert.typedef.heaptype = True def Import_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4560,6 +4580,7 @@ __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) +Import.typedef.heaptype = True def ImportFrom_get_module(space, w_self): if w_self.w_dict is not None: @@ -4668,6 +4689,7 @@ __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) +ImportFrom.typedef.heaptype = True def Exec_get_body(space, w_self): if w_self.w_dict is not None: @@ -4781,6 +4803,7 @@ __new__=interp2app(get_AST_new(Exec)), __init__=interp2app(Exec_init), ) +Exec.typedef.heaptype = True def Global_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4828,6 +4851,7 @@ __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) +Global.typedef.heaptype = True def Expr_get_value(space, w_self): if w_self.w_dict is not None: @@ -4881,6 +4905,7 @@ __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) +Expr.typedef.heaptype = True def Pass_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Pass, w_self) @@ -4898,6 +4923,7 @@ __new__=interp2app(get_AST_new(Pass)), __init__=interp2app(Pass_init), ) +Pass.typedef.heaptype = True def Break_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Break, w_self) @@ -4915,6 +4941,7 @@ __new__=interp2app(get_AST_new(Break)), __init__=interp2app(Break_init), ) +Break.typedef.heaptype = True def Continue_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Continue, w_self) @@ -4932,6 +4959,7 @@ __new__=interp2app(get_AST_new(Continue)), __init__=interp2app(Continue_init), ) +Continue.typedef.heaptype = True def expr_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -4997,6 +5025,7 @@ col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) +expr.typedef.heaptype = True def BoolOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5074,6 +5103,7 @@ __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) +BoolOp.typedef.heaptype = True def BinOp_get_left(space, w_self): if w_self.w_dict is not None: @@ -5187,6 +5217,7 @@ __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) +BinOp.typedef.heaptype = True def UnaryOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5270,6 +5301,7 @@ __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) +UnaryOp.typedef.heaptype = True def Lambda_get_args(space, w_self): if w_self.w_dict is not None: @@ -5351,6 +5383,7 @@ __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) +Lambda.typedef.heaptype = True def IfExp_get_test(space, w_self): if w_self.w_dict is not None: @@ -5464,6 +5497,7 @@ __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) +IfExp.typedef.heaptype = True def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: @@ -5535,6 +5569,7 @@ __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) +Dict.typedef.heaptype = True def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -5582,6 +5617,7 @@ __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) +Set.typedef.heaptype = True def ListComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5659,6 +5695,7 @@ __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) +ListComp.typedef.heaptype = True def SetComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5736,6 +5773,7 @@ __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) +SetComp.typedef.heaptype = True def DictComp_get_key(space, w_self): if w_self.w_dict is not None: @@ -5843,6 +5881,7 @@ __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) +DictComp.typedef.heaptype = True def GeneratorExp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5920,6 +5959,7 @@ __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) +GeneratorExp.typedef.heaptype = True def Yield_get_value(space, w_self): if w_self.w_dict is not None: @@ -5973,6 +6013,7 @@ __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +Yield.typedef.heaptype = True def Compare_get_left(space, w_self): if w_self.w_dict is not None: @@ -6074,6 +6115,7 @@ __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) +Compare.typedef.heaptype = True def Call_get_func(space, w_self): if w_self.w_dict is not None: @@ -6235,6 +6277,7 @@ __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) +Call.typedef.heaptype = True def Repr_get_value(space, w_self): if w_self.w_dict is not None: @@ -6288,6 +6331,7 @@ __new__=interp2app(get_AST_new(Repr)), __init__=interp2app(Repr_init), ) +Repr.typedef.heaptype = True def Num_get_n(space, w_self): if w_self.w_dict is not None: @@ -6340,6 +6384,7 @@ __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) +Num.typedef.heaptype = True def Str_get_s(space, w_self): if w_self.w_dict is not None: @@ -6392,6 +6437,7 @@ __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) +Str.typedef.heaptype = True def Attribute_get_value(space, w_self): if w_self.w_dict is not None: @@ -6504,6 +6550,7 @@ __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) +Attribute.typedef.heaptype = True def Subscript_get_value(space, w_self): if w_self.w_dict is not None: @@ -6617,6 +6664,7 @@ __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) +Subscript.typedef.heaptype = True def Name_get_id(space, w_self): if w_self.w_dict is not None: @@ -6699,6 +6747,7 @@ __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) +Name.typedef.heaptype = True def List_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6776,6 +6825,7 @@ __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) +List.typedef.heaptype = True def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6853,6 +6903,7 @@ __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) +Tuple.typedef.heaptype = True def Const_get_value(space, w_self): if w_self.w_dict is not None: @@ -6905,6 +6956,7 @@ __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) +Const.typedef.heaptype = True expr_context.typedef = typedef.TypeDef("expr_context", AST.typedef, @@ -6912,6 +6964,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(expr_context)), ) +expr_context.typedef.heaptype = True _Load.typedef = typedef.TypeDef("Load", expr_context.typedef, @@ -6919,6 +6972,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Load)), ) +_Load.typedef.heaptype = True _Store.typedef = typedef.TypeDef("Store", expr_context.typedef, @@ -6926,6 +6980,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Store)), ) +_Store.typedef.heaptype = True _Del.typedef = typedef.TypeDef("Del", expr_context.typedef, @@ -6933,6 +6988,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Del)), ) +_Del.typedef.heaptype = True _AugLoad.typedef = typedef.TypeDef("AugLoad", expr_context.typedef, @@ -6940,6 +6996,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugLoad)), ) +_AugLoad.typedef.heaptype = True _AugStore.typedef = typedef.TypeDef("AugStore", expr_context.typedef, @@ -6947,6 +7004,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugStore)), ) +_AugStore.typedef.heaptype = True _Param.typedef = typedef.TypeDef("Param", expr_context.typedef, @@ -6954,6 +7012,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Param)), ) +_Param.typedef.heaptype = True slice.typedef = typedef.TypeDef("slice", AST.typedef, @@ -6961,6 +7020,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(slice)), ) +slice.typedef.heaptype = True def Ellipsis_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Ellipsis, w_self) @@ -6978,6 +7038,7 @@ __new__=interp2app(get_AST_new(Ellipsis)), __init__=interp2app(Ellipsis_init), ) +Ellipsis.typedef.heaptype = True def Slice_get_lower(space, w_self): if w_self.w_dict is not None: @@ -7091,6 +7152,7 @@ __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) +Slice.typedef.heaptype = True def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: @@ -7138,6 +7200,7 @@ __new__=interp2app(get_AST_new(ExtSlice)), __init__=interp2app(ExtSlice_init), ) +ExtSlice.typedef.heaptype = True def Index_get_value(space, w_self): if w_self.w_dict is not None: @@ -7191,6 +7254,7 @@ __new__=interp2app(get_AST_new(Index)), __init__=interp2app(Index_init), ) +Index.typedef.heaptype = True boolop.typedef = typedef.TypeDef("boolop", AST.typedef, @@ -7198,6 +7262,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(boolop)), ) +boolop.typedef.heaptype = True _And.typedef = typedef.TypeDef("And", boolop.typedef, @@ -7205,6 +7270,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_And)), ) +_And.typedef.heaptype = True _Or.typedef = typedef.TypeDef("Or", boolop.typedef, @@ -7212,6 +7278,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Or)), ) +_Or.typedef.heaptype = True operator.typedef = typedef.TypeDef("operator", AST.typedef, @@ -7219,6 +7286,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(operator)), ) +operator.typedef.heaptype = True _Add.typedef = typedef.TypeDef("Add", operator.typedef, @@ -7226,6 +7294,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Add)), ) +_Add.typedef.heaptype = True _Sub.typedef = typedef.TypeDef("Sub", operator.typedef, @@ -7233,6 +7302,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Sub)), ) +_Sub.typedef.heaptype = True _Mult.typedef = typedef.TypeDef("Mult", operator.typedef, @@ -7240,6 +7310,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mult)), ) +_Mult.typedef.heaptype = True _Div.typedef = typedef.TypeDef("Div", operator.typedef, @@ -7247,6 +7318,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Div)), ) +_Div.typedef.heaptype = True _Mod.typedef = typedef.TypeDef("Mod", operator.typedef, @@ -7254,6 +7326,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mod)), ) +_Mod.typedef.heaptype = True _Pow.typedef = typedef.TypeDef("Pow", operator.typedef, @@ -7261,6 +7334,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Pow)), ) +_Pow.typedef.heaptype = True _LShift.typedef = typedef.TypeDef("LShift", operator.typedef, @@ -7268,6 +7342,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LShift)), ) +_LShift.typedef.heaptype = True _RShift.typedef = typedef.TypeDef("RShift", operator.typedef, @@ -7275,6 +7350,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_RShift)), ) +_RShift.typedef.heaptype = True _BitOr.typedef = typedef.TypeDef("BitOr", operator.typedef, @@ -7282,6 +7358,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitOr)), ) +_BitOr.typedef.heaptype = True _BitXor.typedef = typedef.TypeDef("BitXor", operator.typedef, @@ -7289,6 +7366,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitXor)), ) +_BitXor.typedef.heaptype = True _BitAnd.typedef = typedef.TypeDef("BitAnd", operator.typedef, @@ -7296,6 +7374,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitAnd)), ) +_BitAnd.typedef.heaptype = True _FloorDiv.typedef = typedef.TypeDef("FloorDiv", operator.typedef, @@ -7303,6 +7382,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_FloorDiv)), ) +_FloorDiv.typedef.heaptype = True unaryop.typedef = typedef.TypeDef("unaryop", AST.typedef, @@ -7310,6 +7390,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(unaryop)), ) +unaryop.typedef.heaptype = True _Invert.typedef = typedef.TypeDef("Invert", unaryop.typedef, @@ -7317,6 +7398,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Invert)), ) +_Invert.typedef.heaptype = True _Not.typedef = typedef.TypeDef("Not", unaryop.typedef, @@ -7324,6 +7406,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Not)), ) +_Not.typedef.heaptype = True _UAdd.typedef = typedef.TypeDef("UAdd", unaryop.typedef, @@ -7331,6 +7414,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_UAdd)), ) +_UAdd.typedef.heaptype = True _USub.typedef = typedef.TypeDef("USub", unaryop.typedef, @@ -7338,6 +7422,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_USub)), ) +_USub.typedef.heaptype = True cmpop.typedef = typedef.TypeDef("cmpop", AST.typedef, @@ -7345,6 +7430,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(cmpop)), ) +cmpop.typedef.heaptype = True _Eq.typedef = typedef.TypeDef("Eq", cmpop.typedef, @@ -7352,6 +7438,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Eq)), ) +_Eq.typedef.heaptype = True _NotEq.typedef = typedef.TypeDef("NotEq", cmpop.typedef, @@ -7359,6 +7446,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotEq)), ) +_NotEq.typedef.heaptype = True _Lt.typedef = typedef.TypeDef("Lt", cmpop.typedef, @@ -7366,6 +7454,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Lt)), ) +_Lt.typedef.heaptype = True _LtE.typedef = typedef.TypeDef("LtE", cmpop.typedef, @@ -7373,6 +7462,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LtE)), ) +_LtE.typedef.heaptype = True _Gt.typedef = typedef.TypeDef("Gt", cmpop.typedef, @@ -7380,6 +7470,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Gt)), ) +_Gt.typedef.heaptype = True _GtE.typedef = typedef.TypeDef("GtE", cmpop.typedef, @@ -7387,6 +7478,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_GtE)), ) +_GtE.typedef.heaptype = True _Is.typedef = typedef.TypeDef("Is", cmpop.typedef, @@ -7394,6 +7486,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Is)), ) +_Is.typedef.heaptype = True _IsNot.typedef = typedef.TypeDef("IsNot", cmpop.typedef, @@ -7401,6 +7494,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_IsNot)), ) +_IsNot.typedef.heaptype = True _In.typedef = typedef.TypeDef("In", cmpop.typedef, @@ -7408,6 +7502,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_In)), ) +_In.typedef.heaptype = True _NotIn.typedef = typedef.TypeDef("NotIn", cmpop.typedef, @@ -7415,6 +7510,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotIn)), ) +_NotIn.typedef.heaptype = True def comprehension_get_target(space, w_self): if w_self.w_dict is not None: @@ -7522,6 +7618,7 @@ __new__=interp2app(get_AST_new(comprehension)), __init__=interp2app(comprehension_init), ) +comprehension.typedef.heaptype = True def excepthandler_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -7587,6 +7684,7 @@ col_offset=typedef.GetSetProperty(excepthandler_get_col_offset, excepthandler_set_col_offset, excepthandler_del_col_offset, cls=excepthandler), __new__=interp2app(get_AST_new(excepthandler)), ) +excepthandler.typedef.heaptype = True def ExceptHandler_get_type(space, w_self): if w_self.w_dict is not None: @@ -7694,6 +7792,7 @@ __new__=interp2app(get_AST_new(ExceptHandler)), __init__=interp2app(ExceptHandler_init), ) +ExceptHandler.typedef.heaptype = True def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: @@ -7829,6 +7928,7 @@ __new__=interp2app(get_AST_new(arguments)), __init__=interp2app(arguments_init), ) +arguments.typedef.heaptype = True def keyword_get_arg(space, w_self): if w_self.w_dict is not None: @@ -7911,6 +8011,7 @@ __new__=interp2app(get_AST_new(keyword)), __init__=interp2app(keyword_init), ) +keyword.typedef.heaptype = True def alias_get_name(space, w_self): if w_self.w_dict is not None: @@ -7995,4 +8096,5 @@ __new__=interp2app(get_AST_new(alias)), __init__=interp2app(alias_init), ) - +alias.typedef.heaptype = True + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -352,6 +352,7 @@ if needs_init: self.emit("__init__=interp2app(%s_init)," % (name,), 1) self.emit(")") + self.emit("%s.typedef.heaptype = True" % name) self.emit("") def make_init(self, name, fields): @@ -669,10 +670,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -363,7 +363,7 @@ if fmt == 'R': result = space.str_w(space.repr(value)) elif fmt == 'T': - result = space.type(value).get_module_type_name() + result = space.type(value).name elif fmt == 'N': result = value.getname(space) else: @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).get_module_type_name() + %T - The result of space.type(w_arg).name """ if not len(args): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -22,6 +22,7 @@ else: bases = [__base] self.bases = bases + self.heaptype = False self.hasdict = '__dict__' in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.pop('__doc__', None) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -20,9 +20,9 @@ assert isinstance(ast.__version__, str) def test_flags(self): - skip("broken") from copy_reg import _HEAPTYPE - assert self.ast.Module.__flags__ & _HEAPTYPE + assert self.ast.AST.__flags__ & _HEAPTYPE == 0 + assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE def test_build_ast(self): ast = self.ast @@ -223,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert "Num' object has no attribute 'n'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'n'" x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert "Num' object has no attribute 'lineno'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'lineno'" y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert "Num' object has no attribute 'foobar'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'foobar'" x = ast.Num(lineno=2) assert x.lineno == 2 @@ -407,7 +407,7 @@ def test_issue1673_Num_fullinit(self): import ast - import copy + import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) assert num_node.n == num_node2.n diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -56,8 +56,7 @@ raise MiniBuffer.typedef = TypeDef( - "buffer", - __module__ = "_cffi_backend", + "_cffi_backend.buffer", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -441,7 +441,7 @@ W_CData.typedef = TypeDef( - 'CData', + '_cffi_backend.CData', __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -130,8 +130,7 @@ return self.ctitem.convert_to_object(result) W_CDataIter.typedef = TypeDef( - 'CDataIter', - __module__ = '_cffi_backend', + '_cffi_backend.CDataIter', __iter__ = interp2app(W_CDataIter.iter_w), next = interp2app(W_CDataIter.next_w), ) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -210,8 +210,7 @@ W_CType.typedef = TypeDef( - 'CTypeDescr', - __module__ = '_cffi_backend', + '_cffi_backend.CTypeDescr', __repr__ = interp2app(W_CType.repr), __weakref__ = make_weakref_descr(W_CType), kind = GetSetProperty(W_CType.fget_kind, doc="kind"), diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -307,8 +307,7 @@ W_CField.typedef = TypeDef( - 'CField', - __module__ = '_cffi_backend', + '_cffi_backend.CField', type = interp_attrproperty('ctype', W_CField), offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -85,8 +85,7 @@ W_Library.typedef = TypeDef( - 'Library', - __module__ = '_cffi_backend', + '_cffi_backend.Library', __repr__ = interp2app(W_Library.repr), load_function = interp2app(W_Library.load_function), read_variable = interp2app(W_Library.read_variable), diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -463,11 +463,10 @@ W_Deque.__init__(space.interp_w(W_Deque, w_self), space) return w_self -W_Deque.typedef = TypeDef("deque", +W_Deque.typedef = TypeDef("collections.deque", __doc__ = """deque(iterable[, maxlen]) --> deque object Build an ordered collection accessible from endpoints only.""", - __module__ = '_collections', __new__ = interp2app(descr__new__), __init__ = interp2app(W_Deque.init), append = interp2app(W_Deque.append), diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -4,6 +4,8 @@ def test_basics(self): from _collections import deque + assert deque.__module__ == 'collections' + d = deque(xrange(-5125, -5000)) d.__init__(xrange(200)) for i in xrange(200, 400): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -136,8 +136,7 @@ W_Continulet.typedef = TypeDef( - 'continulet', - __module__ = '_continuation', + '_continuation.continulet', __new__ = interp2app(W_Continulet___new__), __init__ = interp2app(W_Continulet.descr_init), switch = interp2app(W_Continulet.descr_switch), diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -154,8 +154,7 @@ W_Dialect.typedef = TypeDef( - 'Dialect', - __module__ = '_csv', + '_csv.Dialect', __new__ = interp2app(W_Dialect___new__), delimiter = interp_attrproperty('delimiter', W_Dialect), diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -245,8 +245,7 @@ return W_Reader(space, dialect, w_iter) W_Reader.typedef = TypeDef( - 'reader', - __module__ = '_csv', + '_csv.reader', dialect = interp_attrproperty_w('dialect', W_Reader), line_num = interp_attrproperty('line_num', W_Reader), __iter__ = interp2app(W_Reader.iter_w), diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -160,8 +160,7 @@ return W_Writer(space, dialect, w_fileobj) W_Writer.typedef = TypeDef( - 'writer', - __module__ = '_csv', + '_csv.writer', dialect = interp_attrproperty_w('dialect', W_Writer), writerow = interp2app(W_Writer.writerow), writerows = interp2app(W_Writer.writerows), diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -211,17 +211,16 @@ return space.call_method(self.w_raw, "isatty") def repr_w(self, space): - typename = space.type(self).getname(space) - module = space.str_w(space.type(self).get_module()) + typename = space.type(self).name try: w_name = space.getattr(self, space.wrap("name")) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - return space.wrap("<%s.%s>" % (module, typename,)) + return space.wrap("<%s>" % (typename,)) else: name_repr = space.str_w(space.repr(w_name)) - return space.wrap("<%s.%s name=%s>" % (module, typename, name_repr)) + return space.wrap("<%s name=%s>" % (typename, name_repr)) # ______________________________________________ @@ -844,10 +843,9 @@ self.state = STATE_OK W_BufferedReader.typedef = TypeDef( - 'BufferedReader', W_BufferedIOBase.typedef, + '_io.BufferedReader', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedReader), __init__ = interp2app(W_BufferedReader.descr_init), - __module__ = "_io", read = interp2app(W_BufferedReader.read_w), peek = interp2app(W_BufferedReader.peek_w), @@ -892,10 +890,9 @@ self.state = STATE_OK W_BufferedWriter.typedef = TypeDef( - 'BufferedWriter', W_BufferedIOBase.typedef, + '_io.BufferedWriter', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedWriter), __init__ = interp2app(W_BufferedWriter.descr_init), - __module__ = "_io", write = interp2app(W_BufferedWriter.write_w), flush = interp2app(W_BufferedWriter.flush_w), @@ -1015,10 +1012,9 @@ self.state = STATE_OK W_BufferedRandom.typedef = TypeDef( - 'BufferedRandom', W_BufferedIOBase.typedef, + '_io.BufferedRandom', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRandom), __init__ = interp2app(W_BufferedRandom.descr_init), - __module__ = "_io", read = interp2app(W_BufferedRandom.read_w), peek = interp2app(W_BufferedRandom.peek_w), diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -264,8 +264,7 @@ W_StringIO.typedef = TypeDef( - 'StringIO', W_TextIOBase.typedef, - __module__ = "_io", + '_io.StringIO', W_TextIOBase.typedef, __new__ = generic_new_descr(W_StringIO), __init__ = interp2app(W_StringIO.descr_init), __getstate__ = interp2app(W_StringIO.descr_getstate), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1015,11 +1015,10 @@ self.chunk_size = size W_TextIOWrapper.typedef = TypeDef( - 'TextIOWrapper', W_TextIOBase.typedef, + '_io.TextIOWrapper', W_TextIOBase.typedef, __new__ = generic_new_descr(W_TextIOWrapper), __init__ = interp2app(W_TextIOWrapper.descr_init), __repr__ = interp2app(W_TextIOWrapper.descr_repr), - __module__ = "_io", next = interp2app(W_TextIOWrapper.next_w), read = interp2app(W_TextIOWrapper.read_w), diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + class_name = w_realclass.name else: name = '?' if class_name is None: @@ -440,8 +440,7 @@ return space.wrap(p) W_Profiler.typedef = TypeDef( - 'Profiler', - __module__ = '_lsprof', + '_lsprof.Profiler', __new__ = interp2app(descr_new_profile), enable = interp2app(W_Profiler.enable), disable = interp2app(W_Profiler.disable), diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -75,7 +75,6 @@ MultibyteIncrementalDecoder.typedef = TypeDef( 'MultibyteIncrementalDecoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbidecoder_new), decode = interp2app(MultibyteIncrementalDecoder.decode_w), reset = interp2app(MultibyteIncrementalDecoder.reset_w), @@ -124,7 +123,6 @@ MultibyteIncrementalEncoder.typedef = TypeDef( 'MultibyteIncrementalEncoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbiencoder_new), encode = interp2app(MultibyteIncrementalEncoder.encode_w), reset = interp2app(MultibyteIncrementalEncoder.reset_w), diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -46,7 +46,6 @@ MultibyteCodec.typedef = TypeDef( 'MultibyteCodec', - __module__ = '_multibytecodec', decode = interp2app(MultibyteCodec.decode), encode = interp2app(MultibyteCodec.encode), ) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -353,9 +353,8 @@ return bool(r) W_FileConnection.typedef = TypeDef( - 'Connection', W_BaseConnection.typedef, + '_multiprocessing.Connection', W_BaseConnection.typedef, __new__ = interp2app(W_FileConnection.descr_new_file.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_FileConnection.fileno), ) @@ -534,8 +533,7 @@ if sys.platform == 'win32': W_PipeConnection.typedef = TypeDef( - 'PipeConnection', W_BaseConnection.typedef, + '_multiprocessing.PipeConnection', W_BaseConnection.typedef, __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_PipeConnection.fileno), ) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,8 +600,7 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("socket", - __module__ = "_socket", +W_RSocket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -488,9 +488,8 @@ return space.wrap(s) W_ArrayBase.typedef = TypeDef( - 'array', + 'array.array', __new__ = interp2app(w_array), - __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -21,10 +21,11 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, l = 0, s = '', vp = rffi.cast(rffi.VOIDP, 0) ): - self._long = l + def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + self._handle = h + self._long = l self._string = s - self._voidp = vp + self._voidp = vp # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -57,7 +58,7 @@ if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): - misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._long), argtype.size) + misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) elif obj._voidp != rffi.cast(rffi.VOIDP, 0): data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp @@ -91,7 +92,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -116,6 +117,8 @@ c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') + c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') + self.capi_call_ifaces = { # name to opaque C++ scope representation 'num_scopes' : ([c_scope], c_int), @@ -152,7 +155,7 @@ 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer - 'allocate_function_args' : ([c_size_t], c_voidp), + 'allocate_function_args' : ([c_int], c_voidp), 'deallocate_function_args' : ([c_voidp], c_void), 'function_arg_sizeof' : ([], c_size_t), 'function_arg_typeoffset' : ([], c_size_t), @@ -169,7 +172,7 @@ 'base_name' : ([c_type, c_int], c_ccharp), 'is_subtype' : ([c_type, c_type], c_int), - 'base_offset' : ([c_type, c_type, c_object, c_int], c_long), + 'base_offset' : ([c_type, c_type, c_object, c_int], c_ptrdiff_t), # method/function reflection information 'num_methods' : ([c_scope], c_int), @@ -199,7 +202,7 @@ 'num_datamembers' : ([c_scope], c_int), 'datamember_name' : ([c_scope, c_int], c_ccharp), 'datamember_type' : ([c_scope, c_int], c_ccharp), - 'datamember_offset' : ([c_scope, c_int], c_size_t), + 'datamember_offset' : ([c_scope, c_int], c_ptrdiff_t), 'datamember_index' : ([c_scope, c_ccharp], c_int), @@ -259,10 +262,13 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) + +def _cdata_to_ptrdiff_t(space, w_cdata): + return rffi.cast(rffi.LONG, space.int_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -273,74 +279,74 @@ # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(l=cppscope.handle), _Arg(l=iscope)] + args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(l=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return call_capi(space, 'call_s', args) def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(l=cppclass.handle)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHPTRGETTER_PTR, _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) @@ -358,47 +364,47 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(l=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) def c_is_enum(space, name): return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(l=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(l=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] + args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] - return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) + args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: - return rffi.cast(rffi.SIZE_T, 0) + return rffi.cast(rffi.LONG, 0) return _c_base_offset(space, derived.handle, base.handle, address, direction) def c_base_offset1(space, derived_h, base, address, direction): return _c_base_offset(space, derived_h, base.handle, address, direction) # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(l=cppscope.handle)] + args = [_Arg(h=cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(l=cppscope.handle), _Arg(l=imethod)] + args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -414,36 +420,36 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(l=cppscope.handle) + arg1 = _Arg(h=cppscope.handle) arg2 = _Arg(l=index) args = [c_resolve_name(space, charp2str_free(space, call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) @@ -451,45 +457,45 @@ return args def c_get_method(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + args = [_Arg(h=cppscope.handle), _Arg(l=index)] + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] + args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] - return _cdata_to_size_t(space, call_capi(space, 'datamember_offset', args)) + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- @@ -509,7 +515,7 @@ def c_charp2stdstring(space, svalue): return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -102,7 +102,7 @@ _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] libffitype = jit_libffi.types.slong - c_type = rffi.LONG + c_type = rffi.LONG c_ptrtype = rffi.LONGP def _unwrap_object(self, space, w_obj): diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); @@ -48,7 +48,7 @@ cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_scope_t scope, cppyy_index_t idx); /* handling of function argument buffer ----------------------------------- */ - void* cppyy_allocate_function_args(size_t nargs); + void* cppyy_allocate_function_args(int nargs); void cppyy_deallocate_function_args(void* args); size_t cppyy_function_arg_sizeof(); size_t cppyy_function_arg_typeoffset(); @@ -66,7 +66,7 @@ int cppyy_is_subtype(cppyy_type_t derived, cppyy_type_t base); /* calculate offsets between declared and actual type, up-cast: direction > 0; down-cast: direction < 0 */ - size_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); + ptrdiff_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t scope); @@ -97,7 +97,7 @@ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); - size_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); + ptrdiff_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); int cppyy_datamember_index(cppyy_scope_t scope, const char* name); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -520,12 +520,12 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { assert(sizeof(CPPYY_G__value) == sizeof(G__value)); G__param* libp = (G__param*)malloc( offsetof(G__param, para) + nargs*sizeof(CPPYY_G__value)); libp->paran = (int)nargs; - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) libp->para[i].type = 'l'; return (void*)libp->para; } @@ -613,7 +613,7 @@ return derived_type->GetBaseClass(base_type) != 0; } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { R__LOCKGUARD2(gCINTMutex); @@ -642,7 +642,7 @@ } } - return (size_t) offset; // may be negative (will roll over) + return (ptrdiff_t) offset; // may be negative (will roll over) } @@ -941,16 +941,16 @@ return cppstring_to_cstring(gbl.GetFullTypeName()); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); - return (size_t)m->GetOffsetCint(); + return (ptrdiff_t)m->GetOffsetCint(); } assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; - return (size_t)gbl.GetAddress(); + return (ptrdiff_t)gbl.GetAddress(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -50,12 +50,12 @@ struct Cppyy_PseudoDatambrInfo { Cppyy_PseudoDatambrInfo(const std::string& name, const std::string& type, - size_t offset, bool isstatic) : + ptrdiff_t offset, bool isstatic) : m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} std::string m_name; std::string m_type; - size_t m_offset; + ptrdiff_t m_offset; bool m_isstatic; }; @@ -120,7 +120,7 @@ #define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ - (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + (ptrdiff_t)&dummy::cppyy_test_data::s_##dmname, true)) struct Cppyy_InitPseudoReflectionInfo { @@ -765,9 +765,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -900,7 +900,7 @@ return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { return s_scopes[handle].m_datambrs[idatambr].m_offset; } diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -212,9 +212,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -310,7 +310,7 @@ return (int)derived_type.HasBase(base_type); } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int direction) { Reflex::Type derived_type = type_from_handle(derived_handle); Reflex::Type base_type = type_from_handle(base_handle); @@ -336,8 +336,8 @@ if (ibase->first.ToType() == base_type) { long offset = (long)ibase->first.Offset((void*)address); if (direction < 0) - return (size_t) -offset; // note negative; rolls over - return (size_t)offset; + return (ptrdiff_t) -offset; // note negative; rolls over + return (ptrdiff_t)offset; } } @@ -561,12 +561,12 @@ return cppstring_to_cstring(name); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { Reflex::Scope s = scope_from_handle(handle); Reflex::Member m = s.DataMemberAt(datamember_index); if (m.IsArtificial() && m.TypeOf().IsEnum()) - return (size_t)&m.InterpreterOffset(); - return m.Offset(); + return (ptrdiff_t)&m.InterpreterOffset(); + return (ptrdiff_t)m.Offset(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,8 +7,6 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - if sys.maxsize < 2 ** 31: - py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") @@ -484,7 +482,7 @@ c = cppyy_test_data() assert c.get_valid_string('aap') == 'aap' - assert c.get_invalid_string() == '' + #assert c.get_invalid_string() == '' def test13_copy_contructor(self): """Test copy constructor""" diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -265,8 +265,6 @@ std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class - return - t0 = "aap\0noot" self.assertEqual(t0, "aap\0noot") diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h From noreply at buildbot.pypy.org Sat May 3 21:52:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 21:52:51 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Backed out changeset 773bc2c8bfc5 Message-ID: <20140503195251.725771C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71235:c96f5bdea897 Date: 2014-05-03 15:50 -0400 http://bitbucket.org/pypy/pypy/changeset/c96f5bdea897/ Log: Backed out changeset 773bc2c8bfc5 diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -91,7 +91,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -259,10 +259,10 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -281,12 +281,12 @@ def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): @@ -302,7 +302,7 @@ call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) @@ -452,7 +452,7 @@ def c_get_method(space, cppscope, index): args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,8 +7,6 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") - if sys.maxsize < 2 ** 31: - py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") From noreply at buildbot.pypy.org Sat May 3 21:52:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 21:52:53 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default Message-ID: <20140503195253.051971C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71236:1644666ea5d3 Date: 2014-05-03 15:51 -0400 http://bitbucket.org/pypy/pypy/changeset/1644666ea5d3/ Log: merge default diff too long, truncating to 2000 out of 2849 lines diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -87,6 +87,10 @@ * Support for corner cases on objects with __int__ and __float__ methods +* Fix multithreaded support for gethostbyname_ex and gethostbyaddr + +* Fix handling of tp_name for type objects + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -164,3 +164,6 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr + +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 773fc6275c69 - +.. startrev: ec864bd08d50 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -121,10 +121,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, @@ -2804,6 +2803,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(mod)), ) +mod.typedef.heaptype = True def Module_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2851,6 +2851,7 @@ __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) +Module.typedef.heaptype = True def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2898,6 +2899,7 @@ __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) +Interactive.typedef.heaptype = True def Expression_get_body(space, w_self): if w_self.w_dict is not None: @@ -2951,6 +2953,7 @@ __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) +Expression.typedef.heaptype = True def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2998,6 +3001,7 @@ __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) +Suite.typedef.heaptype = True def stmt_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -3063,6 +3067,7 @@ col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) +stmt.typedef.heaptype = True def FunctionDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3191,6 +3196,7 @@ __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) +FunctionDef.typedef.heaptype = True def ClassDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3315,6 +3321,7 @@ __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) +ClassDef.typedef.heaptype = True def Return_get_value(space, w_self): if w_self.w_dict is not None: @@ -3368,6 +3375,7 @@ __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) +Return.typedef.heaptype = True def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3415,6 +3423,7 @@ __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) +Delete.typedef.heaptype = True def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3492,6 +3501,7 @@ __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) +Assign.typedef.heaptype = True def AugAssign_get_target(space, w_self): if w_self.w_dict is not None: @@ -3605,6 +3615,7 @@ __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) +AugAssign.typedef.heaptype = True def Print_get_dest(space, w_self): if w_self.w_dict is not None: @@ -3711,6 +3722,7 @@ __new__=interp2app(get_AST_new(Print)), __init__=interp2app(Print_init), ) +Print.typedef.heaptype = True def For_get_target(space, w_self): if w_self.w_dict is not None: @@ -3842,6 +3854,7 @@ __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) +For.typedef.heaptype = True def While_get_test(space, w_self): if w_self.w_dict is not None: @@ -3943,6 +3956,7 @@ __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) +While.typedef.heaptype = True def If_get_test(space, w_self): if w_self.w_dict is not None: @@ -4044,6 +4058,7 @@ __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) +If.typedef.heaptype = True def With_get_context_expr(space, w_self): if w_self.w_dict is not None: @@ -4151,6 +4166,7 @@ __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) +With.typedef.heaptype = True def Raise_get_type(space, w_self): if w_self.w_dict is not None: @@ -4264,6 +4280,7 @@ __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) +Raise.typedef.heaptype = True def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4359,6 +4376,7 @@ __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) +TryExcept.typedef.heaptype = True def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4430,6 +4448,7 @@ __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) +TryFinally.typedef.heaptype = True def Assert_get_test(space, w_self): if w_self.w_dict is not None: @@ -4513,6 +4532,7 @@ __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) +Assert.typedef.heaptype = True def Import_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4560,6 +4580,7 @@ __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) +Import.typedef.heaptype = True def ImportFrom_get_module(space, w_self): if w_self.w_dict is not None: @@ -4668,6 +4689,7 @@ __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) +ImportFrom.typedef.heaptype = True def Exec_get_body(space, w_self): if w_self.w_dict is not None: @@ -4781,6 +4803,7 @@ __new__=interp2app(get_AST_new(Exec)), __init__=interp2app(Exec_init), ) +Exec.typedef.heaptype = True def Global_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4828,6 +4851,7 @@ __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) +Global.typedef.heaptype = True def Expr_get_value(space, w_self): if w_self.w_dict is not None: @@ -4881,6 +4905,7 @@ __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) +Expr.typedef.heaptype = True def Pass_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Pass, w_self) @@ -4898,6 +4923,7 @@ __new__=interp2app(get_AST_new(Pass)), __init__=interp2app(Pass_init), ) +Pass.typedef.heaptype = True def Break_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Break, w_self) @@ -4915,6 +4941,7 @@ __new__=interp2app(get_AST_new(Break)), __init__=interp2app(Break_init), ) +Break.typedef.heaptype = True def Continue_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Continue, w_self) @@ -4932,6 +4959,7 @@ __new__=interp2app(get_AST_new(Continue)), __init__=interp2app(Continue_init), ) +Continue.typedef.heaptype = True def expr_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -4997,6 +5025,7 @@ col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) +expr.typedef.heaptype = True def BoolOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5074,6 +5103,7 @@ __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) +BoolOp.typedef.heaptype = True def BinOp_get_left(space, w_self): if w_self.w_dict is not None: @@ -5187,6 +5217,7 @@ __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) +BinOp.typedef.heaptype = True def UnaryOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5270,6 +5301,7 @@ __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) +UnaryOp.typedef.heaptype = True def Lambda_get_args(space, w_self): if w_self.w_dict is not None: @@ -5351,6 +5383,7 @@ __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) +Lambda.typedef.heaptype = True def IfExp_get_test(space, w_self): if w_self.w_dict is not None: @@ -5464,6 +5497,7 @@ __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) +IfExp.typedef.heaptype = True def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: @@ -5535,6 +5569,7 @@ __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) +Dict.typedef.heaptype = True def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -5582,6 +5617,7 @@ __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) +Set.typedef.heaptype = True def ListComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5659,6 +5695,7 @@ __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) +ListComp.typedef.heaptype = True def SetComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5736,6 +5773,7 @@ __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) +SetComp.typedef.heaptype = True def DictComp_get_key(space, w_self): if w_self.w_dict is not None: @@ -5843,6 +5881,7 @@ __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) +DictComp.typedef.heaptype = True def GeneratorExp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5920,6 +5959,7 @@ __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) +GeneratorExp.typedef.heaptype = True def Yield_get_value(space, w_self): if w_self.w_dict is not None: @@ -5973,6 +6013,7 @@ __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +Yield.typedef.heaptype = True def Compare_get_left(space, w_self): if w_self.w_dict is not None: @@ -6074,6 +6115,7 @@ __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) +Compare.typedef.heaptype = True def Call_get_func(space, w_self): if w_self.w_dict is not None: @@ -6235,6 +6277,7 @@ __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) +Call.typedef.heaptype = True def Repr_get_value(space, w_self): if w_self.w_dict is not None: @@ -6288,6 +6331,7 @@ __new__=interp2app(get_AST_new(Repr)), __init__=interp2app(Repr_init), ) +Repr.typedef.heaptype = True def Num_get_n(space, w_self): if w_self.w_dict is not None: @@ -6340,6 +6384,7 @@ __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) +Num.typedef.heaptype = True def Str_get_s(space, w_self): if w_self.w_dict is not None: @@ -6392,6 +6437,7 @@ __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) +Str.typedef.heaptype = True def Attribute_get_value(space, w_self): if w_self.w_dict is not None: @@ -6504,6 +6550,7 @@ __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) +Attribute.typedef.heaptype = True def Subscript_get_value(space, w_self): if w_self.w_dict is not None: @@ -6617,6 +6664,7 @@ __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) +Subscript.typedef.heaptype = True def Name_get_id(space, w_self): if w_self.w_dict is not None: @@ -6699,6 +6747,7 @@ __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) +Name.typedef.heaptype = True def List_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6776,6 +6825,7 @@ __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) +List.typedef.heaptype = True def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6853,6 +6903,7 @@ __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) +Tuple.typedef.heaptype = True def Const_get_value(space, w_self): if w_self.w_dict is not None: @@ -6905,6 +6956,7 @@ __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) +Const.typedef.heaptype = True expr_context.typedef = typedef.TypeDef("expr_context", AST.typedef, @@ -6912,6 +6964,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(expr_context)), ) +expr_context.typedef.heaptype = True _Load.typedef = typedef.TypeDef("Load", expr_context.typedef, @@ -6919,6 +6972,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Load)), ) +_Load.typedef.heaptype = True _Store.typedef = typedef.TypeDef("Store", expr_context.typedef, @@ -6926,6 +6980,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Store)), ) +_Store.typedef.heaptype = True _Del.typedef = typedef.TypeDef("Del", expr_context.typedef, @@ -6933,6 +6988,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Del)), ) +_Del.typedef.heaptype = True _AugLoad.typedef = typedef.TypeDef("AugLoad", expr_context.typedef, @@ -6940,6 +6996,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugLoad)), ) +_AugLoad.typedef.heaptype = True _AugStore.typedef = typedef.TypeDef("AugStore", expr_context.typedef, @@ -6947,6 +7004,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugStore)), ) +_AugStore.typedef.heaptype = True _Param.typedef = typedef.TypeDef("Param", expr_context.typedef, @@ -6954,6 +7012,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Param)), ) +_Param.typedef.heaptype = True slice.typedef = typedef.TypeDef("slice", AST.typedef, @@ -6961,6 +7020,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(slice)), ) +slice.typedef.heaptype = True def Ellipsis_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Ellipsis, w_self) @@ -6978,6 +7038,7 @@ __new__=interp2app(get_AST_new(Ellipsis)), __init__=interp2app(Ellipsis_init), ) +Ellipsis.typedef.heaptype = True def Slice_get_lower(space, w_self): if w_self.w_dict is not None: @@ -7091,6 +7152,7 @@ __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) +Slice.typedef.heaptype = True def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: @@ -7138,6 +7200,7 @@ __new__=interp2app(get_AST_new(ExtSlice)), __init__=interp2app(ExtSlice_init), ) +ExtSlice.typedef.heaptype = True def Index_get_value(space, w_self): if w_self.w_dict is not None: @@ -7191,6 +7254,7 @@ __new__=interp2app(get_AST_new(Index)), __init__=interp2app(Index_init), ) +Index.typedef.heaptype = True boolop.typedef = typedef.TypeDef("boolop", AST.typedef, @@ -7198,6 +7262,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(boolop)), ) +boolop.typedef.heaptype = True _And.typedef = typedef.TypeDef("And", boolop.typedef, @@ -7205,6 +7270,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_And)), ) +_And.typedef.heaptype = True _Or.typedef = typedef.TypeDef("Or", boolop.typedef, @@ -7212,6 +7278,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Or)), ) +_Or.typedef.heaptype = True operator.typedef = typedef.TypeDef("operator", AST.typedef, @@ -7219,6 +7286,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(operator)), ) +operator.typedef.heaptype = True _Add.typedef = typedef.TypeDef("Add", operator.typedef, @@ -7226,6 +7294,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Add)), ) +_Add.typedef.heaptype = True _Sub.typedef = typedef.TypeDef("Sub", operator.typedef, @@ -7233,6 +7302,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Sub)), ) +_Sub.typedef.heaptype = True _Mult.typedef = typedef.TypeDef("Mult", operator.typedef, @@ -7240,6 +7310,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mult)), ) +_Mult.typedef.heaptype = True _Div.typedef = typedef.TypeDef("Div", operator.typedef, @@ -7247,6 +7318,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Div)), ) +_Div.typedef.heaptype = True _Mod.typedef = typedef.TypeDef("Mod", operator.typedef, @@ -7254,6 +7326,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mod)), ) +_Mod.typedef.heaptype = True _Pow.typedef = typedef.TypeDef("Pow", operator.typedef, @@ -7261,6 +7334,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Pow)), ) +_Pow.typedef.heaptype = True _LShift.typedef = typedef.TypeDef("LShift", operator.typedef, @@ -7268,6 +7342,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LShift)), ) +_LShift.typedef.heaptype = True _RShift.typedef = typedef.TypeDef("RShift", operator.typedef, @@ -7275,6 +7350,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_RShift)), ) +_RShift.typedef.heaptype = True _BitOr.typedef = typedef.TypeDef("BitOr", operator.typedef, @@ -7282,6 +7358,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitOr)), ) +_BitOr.typedef.heaptype = True _BitXor.typedef = typedef.TypeDef("BitXor", operator.typedef, @@ -7289,6 +7366,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitXor)), ) +_BitXor.typedef.heaptype = True _BitAnd.typedef = typedef.TypeDef("BitAnd", operator.typedef, @@ -7296,6 +7374,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitAnd)), ) +_BitAnd.typedef.heaptype = True _FloorDiv.typedef = typedef.TypeDef("FloorDiv", operator.typedef, @@ -7303,6 +7382,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_FloorDiv)), ) +_FloorDiv.typedef.heaptype = True unaryop.typedef = typedef.TypeDef("unaryop", AST.typedef, @@ -7310,6 +7390,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(unaryop)), ) +unaryop.typedef.heaptype = True _Invert.typedef = typedef.TypeDef("Invert", unaryop.typedef, @@ -7317,6 +7398,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Invert)), ) +_Invert.typedef.heaptype = True _Not.typedef = typedef.TypeDef("Not", unaryop.typedef, @@ -7324,6 +7406,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Not)), ) +_Not.typedef.heaptype = True _UAdd.typedef = typedef.TypeDef("UAdd", unaryop.typedef, @@ -7331,6 +7414,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_UAdd)), ) +_UAdd.typedef.heaptype = True _USub.typedef = typedef.TypeDef("USub", unaryop.typedef, @@ -7338,6 +7422,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_USub)), ) +_USub.typedef.heaptype = True cmpop.typedef = typedef.TypeDef("cmpop", AST.typedef, @@ -7345,6 +7430,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(cmpop)), ) +cmpop.typedef.heaptype = True _Eq.typedef = typedef.TypeDef("Eq", cmpop.typedef, @@ -7352,6 +7438,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Eq)), ) +_Eq.typedef.heaptype = True _NotEq.typedef = typedef.TypeDef("NotEq", cmpop.typedef, @@ -7359,6 +7446,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotEq)), ) +_NotEq.typedef.heaptype = True _Lt.typedef = typedef.TypeDef("Lt", cmpop.typedef, @@ -7366,6 +7454,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Lt)), ) +_Lt.typedef.heaptype = True _LtE.typedef = typedef.TypeDef("LtE", cmpop.typedef, @@ -7373,6 +7462,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LtE)), ) +_LtE.typedef.heaptype = True _Gt.typedef = typedef.TypeDef("Gt", cmpop.typedef, @@ -7380,6 +7470,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Gt)), ) +_Gt.typedef.heaptype = True _GtE.typedef = typedef.TypeDef("GtE", cmpop.typedef, @@ -7387,6 +7478,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_GtE)), ) +_GtE.typedef.heaptype = True _Is.typedef = typedef.TypeDef("Is", cmpop.typedef, @@ -7394,6 +7486,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Is)), ) +_Is.typedef.heaptype = True _IsNot.typedef = typedef.TypeDef("IsNot", cmpop.typedef, @@ -7401,6 +7494,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_IsNot)), ) +_IsNot.typedef.heaptype = True _In.typedef = typedef.TypeDef("In", cmpop.typedef, @@ -7408,6 +7502,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_In)), ) +_In.typedef.heaptype = True _NotIn.typedef = typedef.TypeDef("NotIn", cmpop.typedef, @@ -7415,6 +7510,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotIn)), ) +_NotIn.typedef.heaptype = True def comprehension_get_target(space, w_self): if w_self.w_dict is not None: @@ -7522,6 +7618,7 @@ __new__=interp2app(get_AST_new(comprehension)), __init__=interp2app(comprehension_init), ) +comprehension.typedef.heaptype = True def excepthandler_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -7587,6 +7684,7 @@ col_offset=typedef.GetSetProperty(excepthandler_get_col_offset, excepthandler_set_col_offset, excepthandler_del_col_offset, cls=excepthandler), __new__=interp2app(get_AST_new(excepthandler)), ) +excepthandler.typedef.heaptype = True def ExceptHandler_get_type(space, w_self): if w_self.w_dict is not None: @@ -7694,6 +7792,7 @@ __new__=interp2app(get_AST_new(ExceptHandler)), __init__=interp2app(ExceptHandler_init), ) +ExceptHandler.typedef.heaptype = True def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: @@ -7829,6 +7928,7 @@ __new__=interp2app(get_AST_new(arguments)), __init__=interp2app(arguments_init), ) +arguments.typedef.heaptype = True def keyword_get_arg(space, w_self): if w_self.w_dict is not None: @@ -7911,6 +8011,7 @@ __new__=interp2app(get_AST_new(keyword)), __init__=interp2app(keyword_init), ) +keyword.typedef.heaptype = True def alias_get_name(space, w_self): if w_self.w_dict is not None: @@ -7995,4 +8096,5 @@ __new__=interp2app(get_AST_new(alias)), __init__=interp2app(alias_init), ) - +alias.typedef.heaptype = True + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -352,6 +352,7 @@ if needs_init: self.emit("__init__=interp2app(%s_init)," % (name,), 1) self.emit(")") + self.emit("%s.typedef.heaptype = True" % name) self.emit("") def make_init(self, name, fields): @@ -669,10 +670,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -363,7 +363,7 @@ if fmt == 'R': result = space.str_w(space.repr(value)) elif fmt == 'T': - result = space.type(value).get_module_type_name() + result = space.type(value).name elif fmt == 'N': result = value.getname(space) else: @@ -404,7 +404,7 @@ %N - The result of w_arg.getname(space) %R - The result of space.str_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).get_module_type_name() + %T - The result of space.type(w_arg).name """ if not len(args): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -22,6 +22,7 @@ else: bases = [__base] self.bases = bases + self.heaptype = False self.hasdict = '__dict__' in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.pop('__doc__', None) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -20,9 +20,9 @@ assert isinstance(ast.__version__, str) def test_flags(self): - skip("broken") from copy_reg import _HEAPTYPE - assert self.ast.Module.__flags__ & _HEAPTYPE + assert self.ast.AST.__flags__ & _HEAPTYPE == 0 + assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE def test_build_ast(self): ast = self.ast @@ -223,19 +223,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert "Num' object has no attribute 'n'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'n'" x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert "Num' object has no attribute 'lineno'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'lineno'" y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert "Num' object has no attribute 'foobar'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'foobar'" x = ast.Num(lineno=2) assert x.lineno == 2 @@ -407,7 +407,7 @@ def test_issue1673_Num_fullinit(self): import ast - import copy + import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) assert num_node.n == num_node2.n diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -56,8 +56,7 @@ raise MiniBuffer.typedef = TypeDef( - "buffer", - __module__ = "_cffi_backend", + "_cffi_backend.buffer", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -441,7 +441,7 @@ W_CData.typedef = TypeDef( - 'CData', + '_cffi_backend.CData', __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -130,8 +130,7 @@ return self.ctitem.convert_to_object(result) W_CDataIter.typedef = TypeDef( - 'CDataIter', - __module__ = '_cffi_backend', + '_cffi_backend.CDataIter', __iter__ = interp2app(W_CDataIter.iter_w), next = interp2app(W_CDataIter.next_w), ) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -210,8 +210,7 @@ W_CType.typedef = TypeDef( - 'CTypeDescr', - __module__ = '_cffi_backend', + '_cffi_backend.CTypeDescr', __repr__ = interp2app(W_CType.repr), __weakref__ = make_weakref_descr(W_CType), kind = GetSetProperty(W_CType.fget_kind, doc="kind"), diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -307,8 +307,7 @@ W_CField.typedef = TypeDef( - 'CField', - __module__ = '_cffi_backend', + '_cffi_backend.CField', type = interp_attrproperty('ctype', W_CField), offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -85,8 +85,7 @@ W_Library.typedef = TypeDef( - 'Library', - __module__ = '_cffi_backend', + '_cffi_backend.Library', __repr__ = interp2app(W_Library.repr), load_function = interp2app(W_Library.load_function), read_variable = interp2app(W_Library.read_variable), diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -463,11 +463,10 @@ W_Deque.__init__(space.interp_w(W_Deque, w_self), space) return w_self -W_Deque.typedef = TypeDef("deque", +W_Deque.typedef = TypeDef("collections.deque", __doc__ = """deque(iterable[, maxlen]) --> deque object Build an ordered collection accessible from endpoints only.""", - __module__ = '_collections', __new__ = interp2app(descr__new__), __init__ = interp2app(W_Deque.init), append = interp2app(W_Deque.append), diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -4,6 +4,8 @@ def test_basics(self): from _collections import deque + assert deque.__module__ == 'collections' + d = deque(xrange(-5125, -5000)) d.__init__(xrange(200)) for i in xrange(200, 400): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -136,8 +136,7 @@ W_Continulet.typedef = TypeDef( - 'continulet', - __module__ = '_continuation', + '_continuation.continulet', __new__ = interp2app(W_Continulet___new__), __init__ = interp2app(W_Continulet.descr_init), switch = interp2app(W_Continulet.descr_switch), diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -154,8 +154,7 @@ W_Dialect.typedef = TypeDef( - 'Dialect', - __module__ = '_csv', + '_csv.Dialect', __new__ = interp2app(W_Dialect___new__), delimiter = interp_attrproperty('delimiter', W_Dialect), diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -245,8 +245,7 @@ return W_Reader(space, dialect, w_iter) W_Reader.typedef = TypeDef( - 'reader', - __module__ = '_csv', + '_csv.reader', dialect = interp_attrproperty_w('dialect', W_Reader), line_num = interp_attrproperty('line_num', W_Reader), __iter__ = interp2app(W_Reader.iter_w), diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -160,8 +160,7 @@ return W_Writer(space, dialect, w_fileobj) W_Writer.typedef = TypeDef( - 'writer', - __module__ = '_csv', + '_csv.writer', dialect = interp_attrproperty_w('dialect', W_Writer), writerow = interp2app(W_Writer.writerow), writerows = interp2app(W_Writer.writerows), diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -211,17 +211,16 @@ return space.call_method(self.w_raw, "isatty") def repr_w(self, space): - typename = space.type(self).getname(space) - module = space.str_w(space.type(self).get_module()) + typename = space.type(self).name try: w_name = space.getattr(self, space.wrap("name")) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - return space.wrap("<%s.%s>" % (module, typename,)) + return space.wrap("<%s>" % (typename,)) else: name_repr = space.str_w(space.repr(w_name)) - return space.wrap("<%s.%s name=%s>" % (module, typename, name_repr)) + return space.wrap("<%s name=%s>" % (typename, name_repr)) # ______________________________________________ @@ -844,10 +843,9 @@ self.state = STATE_OK W_BufferedReader.typedef = TypeDef( - 'BufferedReader', W_BufferedIOBase.typedef, + '_io.BufferedReader', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedReader), __init__ = interp2app(W_BufferedReader.descr_init), - __module__ = "_io", read = interp2app(W_BufferedReader.read_w), peek = interp2app(W_BufferedReader.peek_w), @@ -892,10 +890,9 @@ self.state = STATE_OK W_BufferedWriter.typedef = TypeDef( - 'BufferedWriter', W_BufferedIOBase.typedef, + '_io.BufferedWriter', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedWriter), __init__ = interp2app(W_BufferedWriter.descr_init), - __module__ = "_io", write = interp2app(W_BufferedWriter.write_w), flush = interp2app(W_BufferedWriter.flush_w), @@ -1015,10 +1012,9 @@ self.state = STATE_OK W_BufferedRandom.typedef = TypeDef( - 'BufferedRandom', W_BufferedIOBase.typedef, + '_io.BufferedRandom', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRandom), __init__ = interp2app(W_BufferedRandom.descr_init), - __module__ = "_io", read = interp2app(W_BufferedRandom.read_w), peek = interp2app(W_BufferedRandom.peek_w), diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -264,8 +264,7 @@ W_StringIO.typedef = TypeDef( - 'StringIO', W_TextIOBase.typedef, - __module__ = "_io", + '_io.StringIO', W_TextIOBase.typedef, __new__ = generic_new_descr(W_StringIO), __init__ = interp2app(W_StringIO.descr_init), __getstate__ = interp2app(W_StringIO.descr_getstate), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1015,11 +1015,10 @@ self.chunk_size = size W_TextIOWrapper.typedef = TypeDef( - 'TextIOWrapper', W_TextIOBase.typedef, + '_io.TextIOWrapper', W_TextIOBase.typedef, __new__ = generic_new_descr(W_TextIOWrapper), __init__ = interp2app(W_TextIOWrapper.descr_init), __repr__ = interp2app(W_TextIOWrapper.descr_repr), - __module__ = "_io", next = interp2app(W_TextIOWrapper.next_w), read = interp2app(W_TextIOWrapper.read_w), diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + class_name = w_realclass.name else: name = '?' if class_name is None: @@ -440,8 +440,7 @@ return space.wrap(p) W_Profiler.typedef = TypeDef( - 'Profiler', - __module__ = '_lsprof', + '_lsprof.Profiler', __new__ = interp2app(descr_new_profile), enable = interp2app(W_Profiler.enable), disable = interp2app(W_Profiler.disable), diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -75,7 +75,6 @@ MultibyteIncrementalDecoder.typedef = TypeDef( 'MultibyteIncrementalDecoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbidecoder_new), decode = interp2app(MultibyteIncrementalDecoder.decode_w), reset = interp2app(MultibyteIncrementalDecoder.reset_w), @@ -124,7 +123,6 @@ MultibyteIncrementalEncoder.typedef = TypeDef( 'MultibyteIncrementalEncoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbiencoder_new), encode = interp2app(MultibyteIncrementalEncoder.encode_w), reset = interp2app(MultibyteIncrementalEncoder.reset_w), diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -46,7 +46,6 @@ MultibyteCodec.typedef = TypeDef( 'MultibyteCodec', - __module__ = '_multibytecodec', decode = interp2app(MultibyteCodec.decode), encode = interp2app(MultibyteCodec.encode), ) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -353,9 +353,8 @@ return bool(r) W_FileConnection.typedef = TypeDef( - 'Connection', W_BaseConnection.typedef, + '_multiprocessing.Connection', W_BaseConnection.typedef, __new__ = interp2app(W_FileConnection.descr_new_file.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_FileConnection.fileno), ) @@ -534,8 +533,7 @@ if sys.platform == 'win32': W_PipeConnection.typedef = TypeDef( - 'PipeConnection', W_BaseConnection.typedef, + '_multiprocessing.PipeConnection', W_BaseConnection.typedef, __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_PipeConnection.fileno), ) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -600,8 +600,7 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("socket", - __module__ = "_socket", +W_RSocket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -488,9 +488,8 @@ return space.wrap(s) W_ArrayBase.typedef = TypeDef( - 'array', + 'array.array', __new__ = interp2app(w_array), - __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -21,10 +21,11 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, l = 0, s = '', vp = rffi.cast(rffi.VOIDP, 0) ): - self._long = l + def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + self._handle = h + self._long = l self._string = s - self._voidp = vp + self._voidp = vp # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -57,7 +58,7 @@ if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): - misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._long), argtype.size) + misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) elif obj._voidp != rffi.cast(rffi.VOIDP, 0): data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp @@ -116,6 +117,8 @@ c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') + c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') + self.capi_call_ifaces = { # name to opaque C++ scope representation 'num_scopes' : ([c_scope], c_int), @@ -152,7 +155,7 @@ 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer - 'allocate_function_args' : ([c_size_t], c_voidp), + 'allocate_function_args' : ([c_int], c_voidp), 'deallocate_function_args' : ([c_voidp], c_void), 'function_arg_sizeof' : ([], c_size_t), 'function_arg_typeoffset' : ([], c_size_t), @@ -169,7 +172,7 @@ 'base_name' : ([c_type, c_int], c_ccharp), 'is_subtype' : ([c_type, c_type], c_int), - 'base_offset' : ([c_type, c_type, c_object, c_int], c_long), + 'base_offset' : ([c_type, c_type, c_object, c_int], c_ptrdiff_t), # method/function reflection information 'num_methods' : ([c_scope], c_int), @@ -199,7 +202,7 @@ 'num_datamembers' : ([c_scope], c_int), 'datamember_name' : ([c_scope, c_int], c_ccharp), 'datamember_type' : ([c_scope, c_int], c_ccharp), - 'datamember_offset' : ([c_scope, c_int], c_size_t), + 'datamember_offset' : ([c_scope, c_int], c_ptrdiff_t), 'datamember_index' : ([c_scope, c_ccharp], c_int), @@ -264,6 +267,9 @@ def _cdata_to_size_t(space, w_cdata): return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) +def _cdata_to_ptrdiff_t(space, w_cdata): + return rffi.cast(rffi.LONG, space.int_w(w_cdata)) + def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False)._cdata) @@ -273,9 +279,9 @@ # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(l=cppscope.handle), _Arg(l=iscope)] + args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): @@ -285,62 +291,62 @@ def c_get_template(space, name): return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] + args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(l=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return call_capi(space, 'call_s', args) def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(l=cppclass.handle)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHPTRGETTER_PTR, _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) @@ -358,47 +364,47 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(l=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) def c_is_enum(space, name): return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(l=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(l=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] + args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] - return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) + args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: - return rffi.cast(rffi.SIZE_T, 0) + return rffi.cast(rffi.LONG, 0) return _c_base_offset(space, derived.handle, base.handle, address, direction) def c_base_offset1(space, derived_h, base, address, direction): return _c_base_offset(space, derived_h, base.handle, address, direction) # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(l=cppscope.handle)] + args = [_Arg(h=cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(l=cppscope.handle), _Arg(l=imethod)] + args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -414,36 +420,36 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(l=cppscope.handle) + arg1 = _Arg(h=cppscope.handle) arg2 = _Arg(l=index) args = [c_resolve_name(space, charp2str_free(space, call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) @@ -451,45 +457,45 @@ return args def c_get_method(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] + args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] - return _cdata_to_size_t(space, call_capi(space, 'datamember_offset', args)) + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- @@ -509,7 +515,7 @@ def c_charp2stdstring(space, svalue): return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -102,7 +102,7 @@ _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] libffitype = jit_libffi.types.slong - c_type = rffi.LONG + c_type = rffi.LONG c_ptrtype = rffi.LONGP def _unwrap_object(self, space, w_obj): diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -48,7 +48,7 @@ cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_scope_t scope, cppyy_index_t idx); /* handling of function argument buffer ----------------------------------- */ - void* cppyy_allocate_function_args(size_t nargs); + void* cppyy_allocate_function_args(int nargs); void cppyy_deallocate_function_args(void* args); size_t cppyy_function_arg_sizeof(); size_t cppyy_function_arg_typeoffset(); @@ -66,7 +66,7 @@ int cppyy_is_subtype(cppyy_type_t derived, cppyy_type_t base); /* calculate offsets between declared and actual type, up-cast: direction > 0; down-cast: direction < 0 */ - size_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); + ptrdiff_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t scope); @@ -97,7 +97,7 @@ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); - size_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); + ptrdiff_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); int cppyy_datamember_index(cppyy_scope_t scope, const char* name); diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -520,12 +520,12 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { assert(sizeof(CPPYY_G__value) == sizeof(G__value)); G__param* libp = (G__param*)malloc( offsetof(G__param, para) + nargs*sizeof(CPPYY_G__value)); libp->paran = (int)nargs; - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) libp->para[i].type = 'l'; return (void*)libp->para; } @@ -613,7 +613,7 @@ return derived_type->GetBaseClass(base_type) != 0; } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { R__LOCKGUARD2(gCINTMutex); @@ -642,7 +642,7 @@ } } - return (size_t) offset; // may be negative (will roll over) + return (ptrdiff_t) offset; // may be negative (will roll over) } @@ -941,16 +941,16 @@ return cppstring_to_cstring(gbl.GetFullTypeName()); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); - return (size_t)m->GetOffsetCint(); + return (ptrdiff_t)m->GetOffsetCint(); } assert(handle == (cppyy_type_t)GLOBAL_HANDLE); TGlobal& gbl = g_globalvars[datamember_index]; - return (size_t)gbl.GetAddress(); + return (ptrdiff_t)gbl.GetAddress(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -50,12 +50,12 @@ struct Cppyy_PseudoDatambrInfo { Cppyy_PseudoDatambrInfo(const std::string& name, const std::string& type, - size_t offset, bool isstatic) : + ptrdiff_t offset, bool isstatic) : m_name(name), m_type(type), m_offset(offset), m_isstatic(isstatic) {} std::string m_name; std::string m_type; - size_t m_offset; + ptrdiff_t m_offset; bool m_isstatic; }; @@ -120,7 +120,7 @@ #define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ - (size_t)&dummy::cppyy_test_data::s_##dmname, true)) + (ptrdiff_t)&dummy::cppyy_test_data::s_##dmname, true)) struct Cppyy_InitPseudoReflectionInfo { @@ -765,9 +765,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -900,7 +900,7 @@ return cppstring_to_cstring(s_scopes[handle].m_datambrs[idatambr].m_type); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int idatambr) { return s_scopes[handle].m_datambrs[idatambr].m_offset; } diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -212,9 +212,9 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) args[i].type = 'l'; return (void*)args; } @@ -310,7 +310,7 @@ return (int)derived_type.HasBase(base_type); } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int direction) { Reflex::Type derived_type = type_from_handle(derived_handle); Reflex::Type base_type = type_from_handle(base_handle); @@ -336,8 +336,8 @@ if (ibase->first.ToType() == base_type) { long offset = (long)ibase->first.Offset((void*)address); if (direction < 0) - return (size_t) -offset; // note negative; rolls over - return (size_t)offset; + return (ptrdiff_t) -offset; // note negative; rolls over + return (ptrdiff_t)offset; } } @@ -561,12 +561,12 @@ return cppstring_to_cstring(name); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { Reflex::Scope s = scope_from_handle(handle); Reflex::Member m = s.DataMemberAt(datamember_index); if (m.IsArtificial() && m.TypeOf().IsEnum()) - return (size_t)&m.InterpreterOffset(); - return m.Offset(); + return (ptrdiff_t)&m.InterpreterOffset(); + return (ptrdiff_t)m.Offset(); } int cppyy_datamember_index(cppyy_scope_t handle, const char* name) { diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -482,7 +482,7 @@ c = cppyy_test_data() assert c.get_valid_string('aap') == 'aap' - assert c.get_invalid_string() == '' + #assert c.get_invalid_string() == '' def test13_copy_contructor(self): """Test copy constructor""" diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -265,8 +265,6 @@ std = cppyy.gbl.std stringy_class = cppyy.gbl.stringy_class - return - t0 = "aap\0noot" self.assertEqual(t0, "aap\0noot") diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -6,6 +6,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, interp_attrproperty, interp_attrproperty_w) +from pypy.objspace.std.typeobject import W_TypeObject from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, @@ -158,7 +159,9 @@ self.doc = doc self.func = func pyo = rffi.cast(PyObject, pto) - self.w_objclass = from_ref(space, pyo) + w_type = from_ref(space, pyo) + assert isinstance(w_type, W_TypeObject) + self.w_objclass = w_type def call(self, space, w_self, w_args, w_kw): if self.wrapper_func is None: @@ -174,7 +177,7 @@ def descr_method_repr(self): return self.space.wrap("" % (self.method_name, - self.w_objclass.getname(self.space))) + self.w_objclass.name)) def cwrapper_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCWrapperObject, w_self) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -33,7 +33,7 @@ assert "copy" in repr(module.fooType.copy) assert repr(module.fooType) == "" assert repr(obj2) == "" - assert repr(module.fooType.__call__) == "" + assert repr(module.fooType.__call__) == "" assert obj2(foo=1, bar=2) == dict(foo=1, bar=2) print(obj.foo) diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -9,11 +9,17 @@ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); PyModule_AddStringConstant(m, "py_version", PY_VERSION); + PyModule_AddIntConstant(m, "py_major_version", PY_MAJOR_VERSION); + PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION); + PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION); PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION); } """ module = self.import_module(name='foo', init=init) assert module.py_version == sys.version[:5] + assert module.py_major_version == sys.version_info.major + assert module.py_minor_version == sys.version_info.minor + assert module.py_micro_version == sys.version_info.micro v = sys.pypy_version_info s = '%d.%d.%d' % (v[0], v[1], v[2]) if v.releaselevel != 'final': diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -291,14 +291,9 @@ convert_getset_defs(space, dict_w, pto.c_tp_getset, self) convert_member_defs(space, dict_w, pto.c_tp_members, self) - full_name = rffi.charp2str(pto.c_tp_name) - if '.' in full_name: - module_name, extension_name = rsplit(full_name, ".", 1) - dict_w["__module__"] = space.wrap(module_name) - else: - extension_name = full_name + name = rffi.charp2str(pto.c_tp_name) - W_TypeObject.__init__(self, space, extension_name, + W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w) if not space.is_true(space.issubtype(self, space.w_type)): self.flag_cpytype = True @@ -518,7 +513,7 @@ from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: - pto.c_tp_name = rffi.str2charp(w_type.getname(space)) + pto.c_tp_name = rffi.str2charp(w_type.name) pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 # uninitialized fields: diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -207,9 +207,8 @@ return interp2app(descr_new_base_exception) W_BaseException.typedef = TypeDef( - 'BaseException', + 'exceptions.BaseException', __doc__ = W_BaseException.__doc__, - __module__ = 'exceptions', __new__ = _new(W_BaseException), __init__ = interp2app(W_BaseException.descr_init), __str__ = interp2app(W_BaseException.descr_str), @@ -244,10 +243,9 @@ for k, v in kwargs.items(): kwargs[k] = interp2app(v.__get__(None, realbase)) W_Exc.typedef = TypeDef( - name, + 'exceptions.' + name, base.typedef, __doc__ = W_Exc.__doc__, From noreply at buildbot.pypy.org Sat May 3 21:52:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 3 May 2014 21:52:54 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge heads Message-ID: <20140503195254.3876A1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71237:2db83dccc1e4 Date: 2014-05-03 15:52 -0400 http://bitbucket.org/pypy/pypy/changeset/2db83dccc1e4/ Log: merge heads From noreply at buildbot.pypy.org Sat May 3 21:58:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 May 2014 21:58:21 +0200 (CEST) Subject: [pypy-commit] pypy default: This isn't used anymore Message-ID: <20140503195821.54B0A1C01CB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71238:f5d582516157 Date: 2014-05-03 12:57 -0700 http://bitbucket.org/pypy/pypy/changeset/f5d582516157/ Log: This isn't used anymore diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -390,9 +390,8 @@ self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - from pypy.interpreter.pycode import cpython_magic, default_magic + from pypy.interpreter.pycode import default_magic self.our_magic = default_magic - self.host_magic = cpython_magic # can be overridden to a subclass self.initialize() From noreply at buildbot.pypy.org Sat May 3 21:58:22 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 May 2014 21:58:22 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140503195822.96D1E1C01CB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71239:f51149fdd0c2 Date: 2014-05-03 12:57 -0700 http://bitbucket.org/pypy/pypy/changeset/f51149fdd0c2/ Log: merged upstream diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -87,6 +87,10 @@ * Support for corner cases on objects with __int__ and __float__ methods +* Fix multithreaded support for gethostbyname_ex and gethostbyaddr + +* Fix handling of tp_name for type objects + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -164,3 +164,6 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr + +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,7 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 773fc6275c69 - -.. branch: fix-tpname -Changes hacks surrounding W_TypeObject.name to match CPython's tp_name +.. startrev: ec864bd08d50 From noreply at buildbot.pypy.org Sat May 3 21:58:49 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 3 May 2014 21:58:49 +0200 (CEST) Subject: [pypy-commit] pypy default: This is also unused Message-ID: <20140503195849.1EEB41C01CB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71240:988f37762851 Date: 2014-05-03 12:58 -0700 http://bitbucket.org/pypy/pypy/changeset/988f37762851/ Log: This is also unused diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -390,10 +390,7 @@ self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - from pypy.interpreter.pycode import default_magic - self.our_magic = default_magic # can be overridden to a subclass - self.initialize() def startup(self): From noreply at buildbot.pypy.org Sun May 4 00:04:20 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 May 2014 00:04:20 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140503220420.1F0C11C0299@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71241:2f47b24c4692 Date: 2014-05-02 11:37 -0700 http://bitbucket.org/pypy/pypy/changeset/2f47b24c4692/ Log: merge default diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -18,17 +18,18 @@ http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy -project. We showed quite a bit of progress on all three projects (see below) -and we're slowly running out of funds. -Please consider donating more so we can finish those projects! The three -projects are: +project, and for those who donate to our three sub-projects. +We showed quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: * `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. * `STM`_ (software transactional memory): a preview will be released very soon, once we fix a few bugs -* `NumPy`_ the work done is included in the PyPy 2.2 release. More details below. +* `NumPy`_ which is included in the PyPy 2.3 release. More details below. .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html @@ -44,8 +45,8 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison; -note that the latest cpython is not faster than cpython 2.7.2) +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) due to its integrated tracing JIT compiler. This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, @@ -56,13 +57,13 @@ bit python is still stalling, we would welcome a volunteer to `handle that`_. -.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org .. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation Highlights ========== -Bugfixes +Bugfixes -------- Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider @@ -71,7 +72,7 @@ * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. -* Generators are faster since they now skip unecessary cleanup +* Generators are faster since they now skip unnecessary cleanup * A first time contributor simplified JIT traces by adding integer bound propagation in indexing and logical operations. @@ -84,6 +85,8 @@ * Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Support for corner cases on objects with __int__ and __float__ methods + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features @@ -97,8 +100,6 @@ * Support for precompiled headers in the build process for MSVC -* Support for objects with __int__ and __float__ methods - * Tweak support of errno in cpyext (the PyPy implemenation of the capi) @@ -127,8 +128,12 @@ * A cffi-based ``numpy.random`` module is available as a branch in the numpy repository, it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in numpy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. +* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load + memory operations used in numpy arrays. Further work remains here in virtualizing the + alloc_raw_storage when possible. This will allow scalars to have storages but still be + virtualized when possible in loops. Cheers + The PyPy Team diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -161,3 +161,7 @@ .. branch: refactor-buffer-api Properly implement old/new buffer API for objects and start work on replacing bufferstr usage + +.. branch: issue1430 +Add a lock for unsafe calls to gethostbyname and gethostbyaddr + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,5 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0524dae88c75 +.. startrev: 0f75ad4d14ce -.. branch: reflex-support diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -705,23 +705,17 @@ def allocate_lock(self): """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" - if self.config.objspace.usemodules.thread: - # we use a sub-function to avoid putting the 'import' statement - # here, where the flow space would see it even if thread=False - return self.__allocate_lock() - else: - return dummy_lock - - def __allocate_lock(self): - from rpython.rlib.rthread import allocate_lock, error + from rpython.rlib import rthread + if not self.config.objspace.usemodules.thread: + return rthread.dummy_lock # hack: we can't have prebuilt locks if we're translating. # In this special situation we should just not lock at all # (translation is not multithreaded anyway). if not we_are_translated() and self.config.translating: raise CannotHaveLock() try: - return allocate_lock() - except error: + return rthread.allocate_lock() + except rthread.error: raise OperationError(self.w_RuntimeError, self.wrap("out of resources")) @@ -1765,24 +1759,6 @@ return space.getitem(w_glob, space.wrap('anonymous')) -class DummyLock(object): - def acquire(self, flag): - return True - - def release(self): - pass - - def _freeze_(self): - return True - - def __enter__(self): - pass - - def __exit__(self, *args): - pass - -dummy_lock = DummyLock() - # Table describing the regular part of the interface of object spaces, # namely all methods which only take w_ arguments and return a w_ result # (if any). diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -20,6 +20,11 @@ ast = self.ast assert isinstance(ast.__version__, str) + def test_flags(self): + skip("broken") + from copy_reg import _HEAPTYPE + assert self.ast.Module.__flags__ & _HEAPTYPE + def test_build_ast(self): ast = self.ast mod = self.get_ast("x = 4") @@ -234,19 +239,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert exc.value.args[0] == "'Num' object has no attribute 'n'" + assert "Num' object has no attribute 'n'" in exc.value.args[0] x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert exc.value.args[0] == "'Num' object has no attribute 'lineno'" + assert "Num' object has no attribute 'lineno'" in exc.value.args[0] y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert exc.value.args[0] == "'Num' object has no attribute 'foobar'" + assert "Num' object has no attribute 'foobar'" in exc.value.args[0] x = ast.Num(lineno=2) assert x.lineno == 2 @@ -260,9 +265,8 @@ raises(TypeError, ast.Num, 1, 2, lineno=0) def test_issue1680_nonseq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -303,9 +307,8 @@ assert not hasattr(mod.body[0], 'name') def test_issue1680_seq(self): + # Test deleting an attribute manually - # Test deleting an attribute manually - _ast = self.ast mod = self.get_ast("self.attr") assert isinstance(mod, _ast.Module) @@ -408,9 +411,8 @@ import ast num_node = ast.Num(n=2, lineno=2, col_offset=3) dict_res = num_node.__dict__ - assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Num_notfullinit(self): import ast import copy @@ -418,7 +420,7 @@ assert num_node.n == 2 assert num_node.lineno == 2 num_node2 = copy.deepcopy(num_node) - + def test_issue1673_Num_fullinit(self): import ast import copy @@ -429,7 +431,7 @@ assert num_node.col_offset == num_node2.col_offset dict_res = num_node2.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - + def test_issue1673_Str(self): import ast import copy @@ -439,4 +441,3 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} - \ No newline at end of file diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -43,7 +43,7 @@ ) by_id = set() for entry in stats: - if entry.code == f1.func_code: + if entry.code == f1.__code__: assert len(entry.calls) == 2 for subentry in entry.calls: assert subentry.code in expected @@ -219,10 +219,10 @@ lines.remove(line) break else: - print('NOT FOUND:', pattern.rstrip('\n')) + print('NOT FOUND: %s' % pattern.rstrip('\n')) print('--- GOT ---') print(got) - print() + print('') print('--- EXPECTED ---') print(expected) assert False diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -17,6 +17,8 @@ def startup(self, space): from rpython.rlib.rsocket import rsocket_startup rsocket_startup() + from pypy.module._socket.interp_func import State + space.fromcache(State).startup(space) def buildloaders(cls): from rpython.rlib import rsocket diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -43,8 +43,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyname_ex(host) + res = rsocket.gethostbyname_ex(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -56,8 +57,9 @@ Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number. """ + lock = space.fromcache(State).netdb_lock try: - res = rsocket.gethostbyaddr(host) + res = rsocket.gethostbyaddr(host, lock) except SocketError, e: raise converted_error(space, e) return common_wrapgethost(space, res) @@ -316,3 +318,10 @@ raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) rsocket.setdefaulttimeout(timeout) + +class State(object): + def __init__(self, space): + self.netdb_lock = None + + def startup(self, space): + self.netdb_lock = space.allocate_lock() diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -7,6 +7,8 @@ def setup_module(mod): if sys.platform == 'win32': py.test.skip("win32 not supported so far") + if sys.maxsize < 2 ** 31: + py.test.skip("32 bit not supported so far") err = os.system("cd '%s' && make datatypesDict.so" % currpath) if err: raise OSError("'make' failed (see stderr)") @@ -30,7 +32,7 @@ def test02_instance_data_read_access(self): """Test read access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -117,7 +119,7 @@ def test03_instance_data_write_access(self): """Test write access to instance public data and verify values""" - import cppyy, sys + import cppyy cppyy_test_data = cppyy.gbl.cppyy_test_data c = cppyy_test_data() @@ -489,14 +491,14 @@ import cppyy four_vector = cppyy.gbl.four_vector - + t1 = four_vector(1., 2., 3., -4.) t2 = four_vector(0., 0., 0., 0.) t3 = four_vector(t1) - + assert t1 == t3 assert t1 != t2 - + for i in range(4): assert t1[i] == t3[i] @@ -625,8 +627,8 @@ def test18_object_and_pointer_comparisons(self): """Verify object and pointer comparisons""" - - import cppyy + + import cppyy gbl = cppyy.gbl c1 = cppyy.bind_object(0, gbl.cppyy_test_data) @@ -662,11 +664,11 @@ def test19_object_validity(self): """Test object validity checking""" - + from cppyy import gbl d = gbl.cppyy_test_pod() - + assert d assert not not d diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -23,7 +23,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', - 'nditer': 'nditer.nditer', + 'nditer': 'nditer.W_NDIter', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -131,12 +131,13 @@ return dtype def get_name(self): - return self.w_box_type.name + name = self.w_box_type.name + if name.endswith('_'): + name = name[:-1] + return name def descr_get_name(self, space): name = self.get_name() - if name[-1] == '_': - name = name[:-1] if self.is_flexible() and self.elsize != 0: return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) @@ -820,7 +821,7 @@ w_box_type=space.gettypefor(boxes.W_ULongBox), ) aliases = { - NPY.BOOL: ['bool', 'bool8'], + NPY.BOOL: ['bool_', 'bool8'], NPY.BYTE: ['byte'], NPY.UBYTE: ['ubyte'], NPY.SHORT: ['short'], @@ -835,8 +836,8 @@ NPY.CFLOAT: ['csingle'], NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], - NPY.STRING: ['string', 'str'], - NPY.UNICODE: ['unicode'], + NPY.STRING: ['string_', 'str'], + NPY.UNICODE: ['unicode_'], } self.alternate_constructors = { NPY.BOOL: [space.w_bool], diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -492,13 +492,15 @@ w_op_dtypes=WrappedDefault(None), order=str, w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(None)) -def nditer(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, w_buffersize, order='K'): +def descr__new__(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes, + w_casting, w_op_axes, w_itershape, w_buffersize, order='K'): return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, w_buffersize, order) -W_NDIter.typedef = TypeDef( - 'nditer', +W_NDIter.typedef = TypeDef('nditer', + __module__ = 'numpy', + __new__ = interp2app(descr__new__), + __iter__ = interp2app(W_NDIter.descr_iter), __getitem__ = interp2app(W_NDIter.descr_getitem), __setitem__ = interp2app(W_NDIter.descr_setitem), @@ -530,3 +532,4 @@ shape = GetSetProperty(W_NDIter.descr_get_shape), value = GetSetProperty(W_NDIter.descr_get_value), ) +W_NDIter.typedef.acceptable_as_base_class = False diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -47,6 +47,7 @@ assert d.kind == 'b' assert dtype(d) is d assert dtype('bool') is d + assert dtype('bool_') is d assert dtype('|b1') is d b = '>' if sys.byteorder == 'little' else '<' assert dtype(b + 'i4') is not dtype(b + 'i4') @@ -63,10 +64,12 @@ assert dtype(int).names is None assert dtype(int).hasobject is False assert dtype(int).subdtype is None + assert dtype(str) is dtype('string') is dtype('string_') + assert dtype(unicode) is dtype('unicode') is dtype('unicode_') assert dtype(None) is dtype(float) - for d in [dtype('i4')]: + for d in [dtype('i4'), dtype('bool')]: for key in ["d[2]", "d['z']", "d[None]"]: exc = raises(KeyError, key) assert exc.value[0] == "There are no fields in dtype %s." % str(d) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -3,6 +3,19 @@ class AppTestNDIter(BaseNumpyAppTest): + def test_type(self): + import numpy as np + assert type(np.nditer) is type + assert np.nditer.__name__ == 'nditer' + assert np.nditer.__module__ == 'numpy' + try: + class Sub(np.nditer): + pass + except TypeError as e: + assert "not an acceptable base" in str(e) + else: + assert False + def test_basic(self): from numpy import arange, nditer, ndarray a = arange(6).reshape(2,3) diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestCProfile(BaseTestPyPyC): @@ -26,6 +26,10 @@ for method in ['append', 'pop']: loop, = log.loops_by_id(method) print loop.ops_by_id(method) - assert ' call(' not in repr(loop.ops_by_id(method)) + # on 32-bit, there is f1=read_timestamp(); ...; + # f2=read_timestamp(); f3=call(llong_sub,f1,f2) + # which should turn into a single PADDQ/PSUBQ + if sys.maxint != 2147483647: + assert ' call(' not in repr(loop.ops_by_id(method)) assert ' call_may_force(' not in repr(loop.ops_by_id(method)) assert ' cond_call(' in repr(loop.ops_by_id(method)) diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -10,6 +10,7 @@ mod = __import__('pypy.module.%s' % modname, None, None, ['__doc__']) # force computation and record what we wrap module = mod.Module(space, W_Root()) + module.startup(space) for name in module.loaders: seeobj_w.append(module._load_lazily(space, name)) if hasattr(module, 'submodules'): diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -61,7 +61,6 @@ class AppTestTypeObject: - def test_abstract_methods(self): class X(object): pass @@ -72,6 +71,13 @@ raises(AttributeError, getattr, type, "__abstractmethods__") raises(TypeError, "int.__abstractmethods__ = ('abc', )") + def test_attribute_error(self): + class X(object): + __module__ = 'test' + x = X() + exc = raises(AttributeError, "x.a") + assert str(exc.value) == "'X' object has no attribute 'a'" + def test_call_type(self): assert type(42) is int C = type('C', (object,), {'x': lambda: 42}) @@ -410,8 +416,7 @@ assert f.__call__() == ((), {}) assert f.__call__("hello", "world") == (("hello", "world"), {}) assert f.__call__(5, bla=6) == ((5,), {"bla": 6}) - assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, - "c": 3}) + assert f.__call__(a=1, b=2, c=3) == ((), {"a": 1, "b": 2, "c": 3}) def test_multipleinheritance_fail(self): try: @@ -529,7 +534,7 @@ def test_metaclass_choice(self): """ events = [] - + class T1(type): def __new__(*args): events.append(args) @@ -552,7 +557,7 @@ assert type(C) is T1 assert type(G) is T1 """ - + def test_descr_typecheck(self): raises(TypeError,type.__dict__['__name__'].__get__,1) raises(TypeError,type.__dict__['__mro__'].__get__,1) @@ -797,7 +802,7 @@ z2 = Z2() z2.__class__ = Z1 assert z2.__class__ == Z1 - + class I(int): pass class F(float): @@ -816,13 +821,12 @@ pass i = I() - i2 = I() i.__class__ = I2 i2.__class__ = I assert i.__class__ == I2 assert i2.__class__ == I - + i3 = I3() raises(TypeError, "i3.__class__ = I2") i3.__class__ = I4 @@ -873,6 +877,12 @@ Abc.__name__ = 'Def' assert Abc.__name__ == 'Def' raises(TypeError, "Abc.__name__ = 42") + try: + Abc.__name__ = 'G\x00hi' + except ValueError as e: + assert str(e) == "__name__ must not contain null bytes" + else: + assert False def test_compare(self): class A(object): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -487,11 +487,12 @@ def get_module_type_name(w_self): space = w_self.space - w_mod = w_self.get_module() - if space.isinstance_w(w_mod, space.w_unicode): - mod = space.unicode_w(w_mod) - if mod != u'builtins': - return u'%s.%s' % (mod, w_self.name.decode('utf-8')) + if not w_self.is_heaptype(): + w_mod = w_self.get_module() + if space.isinstance_w(w_mod, space.w_unicode): + mod = space.unicode_w(w_mod) + if mod != u'builtins': + return u'%s.%s' % (mod, w_self.name.decode('utf-8')) return w_self.name.decode('utf-8') def getname(w_self, space): @@ -631,7 +632,10 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise oefmt(space.w_TypeError, "can't set %N.__name__", w_type) - w_type.name = space.str_w(w_value) + name = space.str_w(w_value) + if '\x00' in name: + raise oefmt(space.w_ValueError, "__name__ must not contain null bytes") + w_type.name = name def descr_get__mro__(space, w_type): w_type = _check(space, w_type) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -351,7 +351,7 @@ arg_consts = [] for i in range(op.numargs()): arg = op.getarg(i) - const = self.get_constant_box(arg) + const = self.optimizer.get_constant_box(arg) if const is None: return None arg_consts.append(const) diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -30,7 +30,7 @@ 'stdio.h', 'netdb.h', 'arpa/inet.h', - 'stdint.h', + 'stdint.h', 'errno.h', ) if _HAS_AF_PACKET: @@ -139,7 +139,7 @@ EAI_SOCKTYPE EAI_SYSTEM IPPROTO_AH IPPROTO_BIP IPPROTO_DSTOPTS IPPROTO_EGP IPPROTO_EON IPPROTO_ESP -IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS +IPPROTO_FRAGMENT IPPROTO_GGP IPPROTO_GRE IPPROTO_HELLO IPPROTO_HOPOPTS IPPROTO_ICMPV6 IPPROTO_IDP IPPROTO_IGMP IPPROTO_IPCOMP IPPROTO_IPIP IPPROTO_IPV4 IPPROTO_IPV6 IPPROTO_MAX IPPROTO_MOBILE IPPROTO_ND IPPROTO_NONE IPPROTO_PIM IPPROTO_PUP IPPROTO_ROUTING IPPROTO_RSVP IPPROTO_TCP IPPROTO_TP @@ -174,7 +174,7 @@ SOCK_DGRAM SOCK_RAW SOCK_RDM SOCK_SEQPACKET SOCK_STREAM -SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE +SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE SO_ACCEPTCONN SO_BROADCAST SO_DEBUG SO_DONTROUTE SO_ERROR SO_EXCLUSIVEADDRUSE SO_KEEPALIVE SO_LINGER SO_OOBINLINE SO_RCVBUF SO_RCVLOWAT SO_RCVTIMEO @@ -286,7 +286,7 @@ ('nl_pid', rffi.INT), ('nl_groups', rffi.INT)], ifdef='AF_NETLINK') - + CConfig.addrinfo = platform.Struct('struct addrinfo', [('ai_flags', rffi.INT), ('ai_family', rffi.INT), diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -18,6 +18,7 @@ from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib import _rsocket_rffi as _c from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rthread import dummy_lock from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET @@ -1124,22 +1125,24 @@ paddr = h_addr_list[i] return (rffi.charp2str(hostent.c_h_name), aliases, address_list) -def gethostbyname_ex(name): - # XXX use gethostbyname_r() if available, and/or use locks if not +def gethostbyname_ex(name, lock=dummy_lock): + # XXX use gethostbyname_r() if available instead of locks addr = gethostbyname(name) - hostent = _c.gethostbyname(name) - return gethost_common(name, hostent, addr) + with lock: + hostent = _c.gethostbyname(name) + return gethost_common(name, hostent, addr) -def gethostbyaddr(ip): - # XXX use gethostbyaddr_r() if available, and/or use locks if not +def gethostbyaddr(ip, lock=dummy_lock): + # XXX use gethostbyaddr_r() if available, instead of locks addr = makeipaddr(ip) assert isinstance(addr, IPAddress) - p, size = addr.lock_in_addr() - try: - hostent = _c.gethostbyaddr(p, size, addr.family) - finally: - addr.unlock() - return gethost_common(ip, hostent, addr) + with lock: + p, size = addr.lock_in_addr() + try: + hostent = _c.gethostbyaddr(p, size, addr.family) + finally: + addr.unlock() + return gethost_common(ip, hostent, addr) def getaddrinfo(host, port_or_service, family=AF_UNSPEC, socktype=0, proto=0, flags=0, diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,4 +1,3 @@ - from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.conftest import cdir @@ -113,6 +112,24 @@ assert len(y) == 0 return rffi.cast(lltype.Signed, ll_start_new_thread(x)) +class DummyLock(object): + def acquire(self, flag): + return True + + def release(self): + pass + + def _freeze_(self): + return True + + def __enter__(self): + pass + + def __exit__(self, *args): + pass + +dummy_lock = DummyLock() + class Lock(object): """ Container for low-level implementation of a lock object diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -3,6 +3,7 @@ from rpython.rlib.rsocket import * import socket as cpy_socket + def setup_module(mod): rsocket_startup() @@ -61,6 +62,43 @@ py.test.fail("could not find the localhost address in %r" % (address_list,)) +def test_thread_safe_gethostbyname_ex(): + import threading + nthreads = 10 + domain = 'google.com' + result = [0] * nthreads + threads = [None] * nthreads + lock = threading.Lock() + def lookup_name(i): + name, aliases, address_list = gethostbyname_ex(domain, lock) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_name, args=[i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + +def test_thread_safe_gethostbyaddr(): + import threading + nthreads = 10 + ip = '8.8.8.8' + domain = gethostbyaddr(ip)[0] + result = [0] * nthreads + threads = [None] * nthreads + lock = threading.Lock() + def lookup_addr(ip, i): + name, aliases, address_list = gethostbyaddr(ip, lock) + if name == domain: + result[i] += 1 + for i in range(nthreads): + threads[i] = threading.Thread(target = lookup_addr, args=[ip, i]) + threads[i].start() + for i in range(nthreads): + threads[i].join() + assert sum(result) == nthreads + def test_gethostbyaddr(): try: cpy_socket.gethostbyaddr("::1") From noreply at buildbot.pypy.org Sun May 4 00:04:21 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 May 2014 00:04:21 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt error messages to py3, kill t# Message-ID: <20140503220421.6BF061C0299@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71242:103e34d2187d Date: 2014-05-03 14:53 -0700 http://bitbucket.org/pypy/pypy/changeset/103e34d2187d/ Log: adapt error messages to py3, kill t# diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1424,7 +1424,7 @@ try: return w_obj.readbuf_w(self) except TypeError: - self._getarg_error("string or buffer", w_obj) + self._getarg_error("bytes or buffer", w_obj) elif code == 's#': if self.isinstance_w(w_obj, self.w_str): return w_obj.bytes_w(self) @@ -1433,7 +1433,7 @@ try: return w_obj.readbuf_w(self).as_str() except TypeError: - self._getarg_error("string or read-only buffer", w_obj) + self._getarg_error("bytes or read-only buffer", w_obj) elif code == 'w*': try: try: @@ -1446,11 +1446,6 @@ return w_obj.writebuf_w(self) except TypeError: self._getarg_error("read-write buffer", w_obj) - elif code == 't#': - try: - return w_obj.charbuf_w(self) - except TypeError: - self._getarg_error("string or read-only character buffer", w_obj) else: assert False @@ -1478,7 +1473,8 @@ try: buf = w_obj.readbuf_w(self) except TypeError: - self._getarg_error("string or buffer", w_obj) + raise oefmt(self.w_TypeError, + "'%T' does not support the buffer interface", w_obj) else: return buf.as_str() From noreply at buildbot.pypy.org Sun May 4 00:04:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 May 2014 00:04:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Buffers now require a readonly attribute Message-ID: <20140503220422.A97631C0299@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71243:9b567d9ab02f Date: 2014-05-03 15:00 -0700 http://bitbucket.org/pypy/pypy/changeset/9b567d9ab02f/ Log: Buffers now require a readonly attribute diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -16,6 +16,7 @@ def __init__(self, w_bytesio): self.w_bytesio = w_bytesio + self.readonly = False def getlength(self): return int(self.w_bytesio.getsize()) From noreply at buildbot.pypy.org Sun May 4 00:04:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 May 2014 00:04:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt memoryview related tests to py3 Message-ID: <20140503220424.312111C0299@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71244:051596edf5c1 Date: 2014-05-03 15:01 -0700 http://bitbucket.org/pypy/pypy/changeset/051596edf5c1/ Log: adapt memoryview related tests to py3 diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -487,13 +487,11 @@ def test_compile(self): co = compile('1+2', '?', 'eval') assert eval(co) == 3 - co = compile(buffer('1+2'), '?', 'eval') + co = compile(memoryview(b'1+2'), '?', 'eval') assert eval(co) == 3 exc = raises(TypeError, compile, chr(0), '?', 'eval') assert str(exc.value) == "compile() expected string without null bytes" - exc = raises(TypeError, compile, unichr(0), '?', 'eval') - assert str(exc.value) == "compile() expected string without null bytes" - exc = raises(TypeError, compile, memoryview('1+2'), '?', 'eval') + exc = raises(TypeError, compile, memoryview(b'1+2'), '?', 'eval') assert str(exc.value) == "expected a readable buffer object" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py --- a/pypy/module/__pypy__/test/test_bytebuffer.py +++ b/pypy/module/__pypy__/test/test_bytebuffer.py @@ -12,18 +12,16 @@ assert b[-1] == b'*' assert b[-2] == b'-' assert b[-3] == b'+' - exc = raises(TypeError, "b[3] = b'abc'") - assert str(exc.value) == "right operand must be a single byte" - exc = raises(TypeError, "b[3:5] = b'abc'") - assert str(exc.value) == "right operand length must match slice length" - exc = raises(TypeError, "b[3:7:2] = b'abc'") - assert str(exc.value) == "right operand length must match slice length" + exc = raises(ValueError, "b[3] = b'abc'") + assert str(exc.value) == "cannot modify size of memoryview object" + exc = raises(ValueError, "b[3:5] = b'abc'") + assert str(exc.value) == "cannot modify size of memoryview object" + raises(NotImplementedError, "b[3:7:2] = b'abc'") b = bytebuffer(10) b[1:3] = b'xy' assert bytes(b) == b"\x00xy" + b"\x00" * 7 - b[4:8:2] = b'zw' - assert bytes(b) == b"\x00xy\x00z\x00w" + b"\x00" * 3 - r = str(buffer(u'#')) - b[6:6+len(r)] = u'#' - assert str(b[:6+len(r)]) == "\x00xy\x00z\x00" + r + # XXX: supported in 3.3 + raises(NotImplementedError, "b[4:8:2] = b'zw'") + #b[4:8:2] = b'zw' + #assert bytes(b) == b"\x00xy\x00z\x00w" + b"\x00" * 3 diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -139,12 +139,11 @@ raw = _io.FileIO(self.tmpfile) f = _io.BufferedReader(raw) assert f.readinto(a) == 5 + f.seek(0) + m = memoryview(bytearray(b"hello")) + assert f.readinto(m) == 5 exc = raises(TypeError, f.readinto, u"hello") - assert str(exc.value) == "cannot use unicode as modifiable buffer" - exc = raises(TypeError, f.readinto, buffer(b"hello")) - assert str(exc.value) == "must be read-write buffer, not buffer" - exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) - assert str(exc.value) == "must be read-write buffer, not buffer" + assert str(exc.value) == "must be read-write buffer, not str" exc = raises(TypeError, f.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -44,7 +44,7 @@ assert f.write(b"") == 0 assert f.write(b"hello") == 5 exc = raises(TypeError, f.write, u"lo") - assert str(exc.value) == "'unicode' does not have the buffer interface" + assert str(exc.value) == "'str' does not have the buffer interface" import gc; gc.collect() assert f.getvalue() == b"hello" f.close() @@ -104,12 +104,11 @@ a2 = bytearray(b'testing') assert b.readinto(a1) == 1 assert b.readinto(a2) == 4 + b.seek(0) + m = memoryview(bytearray(b"world")) + assert b.readinto(m) == 5 exc = raises(TypeError, b.readinto, u"hello") - assert str(exc.value) == "cannot use unicode as modifiable buffer" - exc = raises(TypeError, b.readinto, buffer(b"hello")) - assert str(exc.value) == "must be read-write buffer, not buffer" - exc = raises(TypeError, b.readinto, buffer(bytearray("hello"))) - assert str(exc.value) == "must be read-write buffer, not buffer" + assert str(exc.value) == "must be read-write buffer, not str" exc = raises(TypeError, b.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" b.close() diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -135,12 +135,11 @@ a = bytearray(b'x' * 10) f = _io.FileIO(self.tmpfile, 'r+') assert f.readinto(a) == 10 + f.seek(0) + m = memoryview(bytearray(b"helloworld")) + assert f.readinto(m) == 10 exc = raises(TypeError, f.readinto, u"hello") - assert str(exc.value) == "cannot use unicode as modifiable buffer" - exc = raises(TypeError, f.readinto, buffer(b"hello")) - assert str(exc.value) == "must be read-write buffer, not buffer" - exc = raises(TypeError, f.readinto, buffer(bytearray("hello"))) - assert str(exc.value) == "must be read-write buffer, not buffer" + assert str(exc.value) == "must be read-write buffer, not str" exc = raises(TypeError, f.readinto, memoryview(b"hello")) assert str(exc.value) == "must be read-write buffer, not memoryview" f.close() diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -540,7 +540,7 @@ except _socket.gaierror as ex: skip("GAIError - probably no connection: %s" % str(ex.args)) exc = raises(TypeError, s.send, None) - assert str(exc.value) == "must be string or buffer, not None" + assert str(exc.value) == "'NoneType' does not support the buffer interface" assert s.send(memoryview(b'')) == 0 assert s.sendall(memoryview(b'')) is None exc = raises(TypeError, s.send, '') diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -133,11 +133,12 @@ def test_fromstring(self): a = self.array('b') a.fromstring('Hi!') - assert a[0] == b'H' and a[1] == b'i' and a[2] == b'!' and len(a) == 3 + assert len(a) == 3 + assert a[0] == ord(b'H') and a[1] == ord(b'i') and a[2] == ord(b'!') a = self.array('b') - exc = raises(TypeError, a.fromstring, memoryview(b'xyz')) - assert str(exc.value) == "must be string or read-only buffer, not memoryview" - assert a[0] == b'x' and a[1] == b'y' and a[2] == b'z' and len(a) == 3 + a.fromstring(memoryview(b'xyz')) + assert len(a) == 3 + assert a[0] == ord(b'x') and a[1] == ord(b'y') and a[2] == ord(b'z') a = self.array('b') a.fromstring('') assert not len(a) diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -25,8 +25,10 @@ s = marshal.dumps(array.array('b', b'asd')) t = marshal.loads(s) assert type(t) is bytes and t == b'asd' - exc = raises(ValueError, marshal.dumps, memoryview(b'asd')) - assert str(exc.value) == "unmarshallable object" + + s = marshal.dumps(memoryview(b'asd')) + t = marshal.loads(s) + assert type(t) is bytes and t == b'asd' def test_unmarshal_evil_long(self): import marshal From noreply at buildbot.pypy.org Sun May 4 00:04:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 4 May 2014 00:04:25 +0200 (CEST) Subject: [pypy-commit] pypy py3k: refix: disallow unicode Message-ID: <20140503220425.8D4691C0299@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71245:ffea0e1ff052 Date: 2014-05-03 15:01 -0700 http://bitbucket.org/pypy/pypy/changeset/ffea0e1ff052/ Log: refix: disallow unicode diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -711,7 +711,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.getarg_w('s*', w_data).as_str() + data = space.bufferstr_w(w_data) size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -340,7 +340,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.getarg_w('s*', w_data).as_str() + data = space.bufferstr_w(w_data) try: n = os.write(self.fd, data) From noreply at buildbot.pypy.org Sun May 4 00:35:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 00:35:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: kill off old buffer interface and its usage Message-ID: <20140503223545.A80461C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71246:7bcdbe1b48bd Date: 2014-05-03 18:35 -0400 http://bitbucket.org/pypy/pypy/changeset/7bcdbe1b48bd/ Log: kill off old buffer interface and its usage diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -202,30 +202,6 @@ return w_result.buffer_w(space, flags) raise TypeError - def readbuf_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_memoryview): - return w_result.readbuf_w(space) - return self.buffer_w(space, space.BUF_SIMPLE) - - def writebuf_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_memoryview): - return w_result.writebuf_w(space) - return self.buffer_w(space, space.BUF_WRITABLE) - - def charbuf_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_memoryview): - return w_result.charbuf_w(space) - return self.buffer_w(space, space.BUF_SIMPLE).as_str() - def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") @@ -1373,31 +1349,33 @@ return w_obj.buffer_w(self, flags) except TypeError: raise oefmt(self.w_TypeError, - "'%T' does not have the buffer interface", w_obj) + "'%T' does not support the buffer interface", w_obj) def readbuf_w(self, w_obj): # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) try: - return w_obj.readbuf_w(self) + return w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: raise oefmt(self.w_TypeError, - "expected a readable buffer object") + "expected an object with a buffer interface") def writebuf_w(self, w_obj): # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) try: - return w_obj.writebuf_w(self) + return w_obj.buffer_w(self, self.BUF_WRITABLE) except TypeError: raise oefmt(self.w_TypeError, - "expected a writeable buffer object") + "expected an object with a writable buffer interface") def charbuf_w(self, w_obj): # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) try: - return w_obj.charbuf_w(self) + buf = w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: raise oefmt(self.w_TypeError, - "expected a character buffer object") + "expected an object with a buffer interface") + else: + return buf.as_str() def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): @@ -1414,15 +1392,11 @@ code = 's*' if code == 's*': if self.isinstance_w(w_obj, self.w_str): - return w_obj.readbuf_w(self) + return StringBuffer(w_obj.bytes_w) if self.isinstance_w(w_obj, self.w_unicode): return StringBuffer(w_obj.identifier_w(self)) try: - return w_obj.buffer_w(self, 0) - except TypeError: - pass - try: - return w_obj.readbuf_w(self) + return w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: self._getarg_error("bytes or buffer", w_obj) elif code == 's#': @@ -1431,7 +1405,7 @@ if self.isinstance_w(w_obj, self.w_unicode): return w_obj.identifier_w(self) try: - return w_obj.readbuf_w(self).as_str() + return w_obj.buffer_w(self, self.BUF_SIMPLE).as_str() except TypeError: self._getarg_error("bytes or read-only buffer", w_obj) elif code == 'w*': @@ -1439,13 +1413,10 @@ try: return w_obj.buffer_w(self, self.BUF_WRITABLE) except OperationError: - self._getarg_error("read-write buffer", w_obj) + pass except TypeError: pass - try: - return w_obj.writebuf_w(self) - except TypeError: - self._getarg_error("read-write buffer", w_obj) + self._getarg_error("read-write buffer", w_obj) else: assert False @@ -1467,12 +1438,6 @@ try: buf = w_obj.buffer_w(self, 0) except TypeError: - pass - else: - return buf.as_str() - try: - buf = w_obj.readbuf_w(self) - except TypeError: raise oefmt(self.w_TypeError, "'%T' does not support the buffer interface", w_obj) else: diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -363,12 +363,6 @@ def buffer_w(self, space, flags): return RawFFIBuffer(self) - def readbuf_w(self, space): - return RawFFIBuffer(self) - - def writebuf_w(self, space): - return RawFFIBuffer(self) - def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -141,12 +141,6 @@ def buffer_w(self, space, flags): return ArrayBuffer(self, False) - def readbuf_w(self, space): - return ArrayBuffer(self, True) - - def writebuf_w(self, space): - return ArrayBuffer(self, False) - def descr_append(self, space, w_x): """ append(x) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -340,12 +340,6 @@ def buffer_w(self, space, flags): return self.descr_ravel(space).buffer_w(space, flags) - def readbuf_w(self, space): - return self.descr_ravel(space).readbuf_w(space) - - def charbuf_w(self, space): - return self.descr_ravel(space).charbuf_w(space) - def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -621,15 +621,6 @@ def buffer_w(self, space, flags): return self.implementation.get_buffer(space, True) - def readbuf_w(self, space): - return self.implementation.get_buffer(space, True) - - def writebuf_w(self, space): - return self.implementation.get_buffer(space, False) - - def charbuf_w(self, space): - return self.implementation.get_buffer(space, True).as_str() - def descr_get_data(self, space): return space.newbuffer(self.implementation.get_buffer(space, False)) diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -17,9 +17,9 @@ self.space = space self.mmap = mmap_obj - def readbuf_w(self, space): + def buffer_w(self, space, flags): self.check_valid() - return MMapBuffer(self.space, self.mmap, True) + return MMapBuffer(self.space, self.mmap, flags & space.BUF_WRITABLE) def close(self): self.mmap.close() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -31,15 +31,6 @@ def buffer_w(self, space, flags): return BytearrayBuffer(self.data, False) - def readbuf_w(self, space): - return BytearrayBuffer(self.data, True) - - def writebuf_w(self, space): - return BytearrayBuffer(self.data, False) - - def charbuf_w(self, space): - return ''.join(self.data) - def _new(self, value): return W_BytearrayObject(_make_data(value)) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -402,13 +402,6 @@ space.check_buf_flags(flags, True) return StringBuffer(self._value) - def readbuf_w(self, space): - return StringBuffer(self._value) - - def writebuf_w(self, space): - raise OperationError(space.w_TypeError, space.wrap( - "Cannot use string as modifiable buffer")) - def listview_int(self): return _create_list_from_bytes(self._value) @@ -440,7 +433,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return space.charbuf_w(w_other) + return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() def _chr(self, char): assert len(char) == 1 diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -39,9 +39,6 @@ def buffer_w(self, space, flags): return StringBuffer(self.force()) - def readbuf_w(self, space): - return StringBuffer(self.force()) - def descr_len(self, space): return space.wrap(self.length) From noreply at buildbot.pypy.org Sun May 4 00:45:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 00:45:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k: redefine this Message-ID: <20140503224522.16D6D1C0299@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71247:e9bdda4d9ef2 Date: 2014-05-03 18:44 -0400 http://bitbucket.org/pypy/pypy/changeset/e9bdda4d9ef2/ Log: redefine this diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -50,7 +50,8 @@ raise oefmt(space.w_IndexError, "bytearray index out of range") return space.wrap(ord(character)) - _val = charbuf_w + def _val(self, space): + return ''.join(self.data) @staticmethod def _op_val(space, w_other): From noreply at buildbot.pypy.org Sun May 4 00:45:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 00:45:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: add getarg('y*'), use instead of bufferstr_w here Message-ID: <20140503224523.6D2DE1C0299@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71248:3360556c7eec Date: 2014-05-03 18:44 -0400 http://bitbucket.org/pypy/pypy/changeset/3360556c7eec/ Log: add getarg('y*'), use instead of bufferstr_w here diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1417,6 +1417,11 @@ except TypeError: pass self._getarg_error("read-write buffer", w_obj) + elif code == 'y*': + try: + return w_obj.buffer_w(self, self.BUF_SIMPLE) + except TypeError: + self._getarg_error("bytes or buffer", w_obj) else: assert False diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -711,7 +711,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('y*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -340,7 +340,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('y*', w_data).as_str() try: n = os.write(self.fd, data) From noreply at buildbot.pypy.org Sun May 4 00:57:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 00:57:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k: kill bufferstr_or_u_w Message-ID: <20140503225727.595E41C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71249:86e1c15b52c6 Date: 2014-05-03 18:55 -0400 http://bitbucket.org/pypy/pypy/changeset/86e1c15b52c6/ Log: kill bufferstr_or_u_w diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1392,7 +1392,7 @@ code = 's*' if code == 's*': if self.isinstance_w(w_obj, self.w_str): - return StringBuffer(w_obj.bytes_w) + return StringBuffer(w_obj.bytes_w(self)) if self.isinstance_w(w_obj, self.w_unicode): return StringBuffer(w_obj.identifier_w(self)) try: @@ -1448,17 +1448,6 @@ else: return buf.as_str() - def bufferstr_or_u_w(self, w_obj): - """Returns an interp-level str, directly if possible. - - Accepts unicode or any type supporting the buffer - interface. Unicode objects will be encoded to the default - encoding (UTF-8) - """ - if self.isinstance_w(w_obj, self.w_unicode): - return w_obj.identifier_w(self) - return self.bufferstr_w(w_obj) - def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): return None diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -129,9 +129,6 @@ def visit_bufferstr(self, el, app_sig): self.checked_space_method(el, app_sig) - def visit_bufferstr_or_u(self, el, app_sig): - self.checked_space_method(el, app_sig) - def visit_str_or_None(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -251,9 +248,6 @@ def visit_bufferstr(self, typ): self.run_args.append("space.bufferstr_w(%s)" % (self.scopenext(),)) - def visit_bufferstr_or_u(self, typ): - self.run_args.append("space.bufferstr_or_u_w(%s)" % (self.scopenext(),)) - def visit_str_or_None(self, typ): self.run_args.append("space.str_or_None_w(%s)" % (self.scopenext(),)) @@ -397,9 +391,6 @@ def visit_bufferstr(self, typ): self.unwrap.append("space.bufferstr_w(%s)" % (self.nextarg(),)) - def visit_bufferstr_or_u(self, typ): - self.unwrap.append("space.bufferstr_or_u_w(%s)" % (self.nextarg(),)) - def visit_str_or_None(self, typ): self.unwrap.append("space.str_or_None_w(%s)" % (self.nextarg(),)) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -769,9 +769,9 @@ return -1 return space.int_w(w_code) - at unwrap_spec(string='bufferstr_or_u', errors='str_or_None', - w_final=WrappedDefault(False)) -def unicode_escape_decode(space, string, errors="strict", w_final=None): + at unwrap_spec(errors='str_or_None', w_final=WrappedDefault(False)) +def unicode_escape_decode(space, w_string, errors="strict", w_final=None): + string = space.getarg_w('s*', w_string).as_str() if errors is None: errors = 'strict' final = space.is_true(w_final) @@ -789,9 +789,9 @@ # ____________________________________________________________ # Raw Unicode escape (accepts bytes or str) - at unwrap_spec(string='bufferstr_or_u', errors='str_or_None', - w_final=WrappedDefault(False)) -def raw_unicode_escape_decode(space, string, errors="strict", w_final=None): + at unwrap_spec(errors='str_or_None', w_final=WrappedDefault(False)) +def raw_unicode_escape_decode(space, w_string, errors="strict", w_final=None): + string = space.getarg_w('s*', w_string).as_str() if errors is None: errors = 'strict' final = space.is_true(w_final) @@ -828,14 +828,16 @@ # support for the "string escape" translation # This is a bytes-to bytes transformation - at unwrap_spec(data="bufferstr", errors='str_or_None') -def escape_encode(space, data, errors='strict'): + at unwrap_spec(errors='str_or_None') +def escape_encode(space, w_data, errors='strict'): + data = space.bytes_w(w_data) from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, False) return space.newtuple([space.wrapbytes(result), space.wrap(len(data))]) - at unwrap_spec(data='bufferstr_or_u', errors='str_or_None') -def escape_decode(space, data, errors='strict'): + at unwrap_spec(errors='str_or_None') +def escape_decode(space, w_data, errors='strict'): + data = space.getarg_w('s#', w_data) from pypy.interpreter.pyparser.parsestring import PyString_DecodeEscape result = PyString_DecodeEscape(space, data, errors, None) return space.newtuple([space.wrapbytes(result), space.wrap(len(data))]) From noreply at buildbot.pypy.org Sun May 4 02:20:12 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 May 2014 02:20:12 +0200 (CEST) Subject: [pypy-commit] pypy default: modernise raise syntax Message-ID: <20140504002012.599951D2B94@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71250:65b44c3e6ab8 Date: 2014-05-04 01:19 +0100 http://bitbucket.org/pypy/pypy/changeset/65b44c3e6ab8/ Log: modernise raise syntax diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -353,7 +353,7 @@ result = description.FunctionDesc(self, pyobj) elif isinstance(pyobj, (type, types.ClassType)): if pyobj is object: - raise Exception, "ClassDesc for object not supported" + raise Exception("ClassDesc for object not supported") if pyobj.__module__ == '__builtin__': # avoid making classdefs for builtin types result = self.getfrozen(pyobj) else: @@ -591,7 +591,7 @@ for name, value in dict.iteritems(): if value is func: return cls, name - raise Exception, "could not match bound-method to attribute name: %r" % (boundmeth,) + raise Exception("could not match bound-method to attribute name: %r" % (boundmeth,)) def ishashable(x): try: diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -65,14 +65,14 @@ s_start, s_stop = args[:2] s_step = args[2] else: - raise Exception, "range() takes 1 to 3 arguments" + raise Exception("range() takes 1 to 3 arguments") empty = False # so far if not s_step.is_constant(): step = 0 # this case signals a variable step else: step = s_step.const if step == 0: - raise Exception, "range() with step zero" + raise Exception("range() with step zero") if s_start.is_constant() and s_stop.is_constant(): try: if len(xrange(s_start.const, s_stop.const, step)) == 0: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -394,7 +394,7 @@ return SomePBC([subdef.classdesc for subdef in self.getallsubdefs()]) def _freeze_(self): - raise Exception, "ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly" + raise Exception("ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly") # ____________________________________________________________ diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -30,7 +30,7 @@ except (KeyboardInterrupt, SystemExit): raise except: - raise Exception, "broken specialize directive parms: %s" % directive + raise Exception("broken specialize directive parms: %s" % directive) name = name.replace(':', '__') try: specializer = getattr(pol, name) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1435,7 +1435,7 @@ elif a==2: raise X(1) elif a==3: - raise X,4 + raise X(4) else: try: l[0] @@ -3628,7 +3628,7 @@ def f(): e = OverflowError() lle = cast_instance_to_base_ptr(e) - raise Exception, lle + raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() py.test.raises(AssertionError, a.build_types, f, []) From noreply at buildbot.pypy.org Sun May 4 02:33:31 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 May 2014 02:33:31 +0200 (CEST) Subject: [pypy-commit] pypy default: modernise raise syntax Message-ID: <20140504003331.76E571D2B94@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71251:f66d67cc5297 Date: 2014-05-04 01:33 +0100 http://bitbucket.org/pypy/pypy/changeset/f66d67cc5297/ Log: modernise raise syntax diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -593,7 +593,7 @@ def can_enter_jit(_self, **livevars): if _self.autoreds: - raise TypeError, "Cannot call can_enter_jit on a driver with reds='auto'" + raise TypeError("Cannot call can_enter_jit on a driver with reds='auto'") # special-cased by ExtRegistryEntry if _self.check_untranslated: _self._check_arguments(livevars, False) diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -109,11 +109,11 @@ def _check_type(TYPE): if isinstance(TYPE, lltype.Ptr): if TYPE.TO._gckind != 'raw': - raise TypeError, "Can only push raw values to C, not 'gc'" + raise TypeError("Can only push raw values to C, not 'gc'") # XXX probably we should recursively check for struct fields here, # lets just ignore that for now if isinstance(TYPE.TO, lltype.Array) and 'nolength' not in TYPE.TO._hints: - raise TypeError, "Can only push to C arrays without length info" + raise TypeError("Can only push to C arrays without length info") class ArgChain(object): @@ -136,7 +136,7 @@ elif TYPE is rffi.FLOAT: cls = SingleFloatArg else: - raise TypeError, 'Unsupported argument type: %s' % TYPE + raise TypeError('Unsupported argument type: %s' % TYPE) self._append(cls(val)) return self @@ -247,8 +247,8 @@ # assuming that argchain is completely virtual. self = jit.promote(self) if argchain.numargs != len(self.argtypes): - raise TypeError, 'Wrong number of arguments: %d expected, got %d' %\ - (len(self.argtypes), argchain.numargs) + raise TypeError('Wrong number of arguments: %d expected, got %d' % + (len(self.argtypes), argchain.numargs)) ll_args = self._prepare() i = 0 arg = argchain.first @@ -273,7 +273,7 @@ elif RESULT is lltype.Void: return self._do_call_void(self.funcsym, ll_args) else: - raise TypeError, 'Unsupported result type: %s' % RESULT + raise TypeError('Unsupported result type: %s' % RESULT) # return rffi.cast(RESULT, res) @@ -430,7 +430,7 @@ def getpointer_by_ordinal(self, name, argtypes, restype, flags=FUNCFLAG_CDECL): - return Func('by_ordinal', argtypes, restype, + return Func('by_ordinal', argtypes, restype, dlsym_byordinal(self.lib, name), flags=flags, keepalive=self) def getaddressindll(self, name): diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -122,7 +122,7 @@ """ typecheck = kwds.pop('typecheck', True) if types_ and kwds: - raise TypeError, 'Cannot mix positional arguments and keywords' + raise TypeError('Cannot mix positional arguments and keywords') if not typecheck: def decorator(f): @@ -177,7 +177,7 @@ if not s_expected.contains(s_argtype): msg = "%s argument %r must be of type %s" % ( f.func_name, srcargs[i], expected_type) - raise TypeError, msg + raise TypeError(msg) # template = """ def {name}({arglist}): @@ -576,7 +576,7 @@ # ____________________________________________________________ def hlinvoke(repr, llcallable, *args): - raise TypeError, "hlinvoke is meant to be rtyped and not called direclty" + raise TypeError("hlinvoke is meant to be rtyped and not called direclty") def invoke_around_extcall(before, after): """Call before() before any external function call, and after() after. diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -173,7 +173,7 @@ if type(r) is long and not is_valid_int(r): # checks only if applicable to r's type. # this happens in the garbage collector. - raise OverflowError, "signed integer expression did overflow" + raise OverflowError("signed integer expression did overflow") return r # Strange things happening for float to int on 64 bit: @@ -213,7 +213,7 @@ return other_type if self_type.SIGNED == other_type.SIGNED: return build_int(None, self_type.SIGNED, max(self_type.BITS, other_type.BITS)) - raise AssertionError, "Merging these types (%s, %s) is not supported" % (self_type, other_type) + raise AssertionError("Merging these types (%s, %s) is not supported" % (self_type, other_type)) def signedtype(t): if t in (bool, int, long): diff --git a/rpython/rlib/rsre/rpy/sre_compile.py b/rpython/rlib/rsre/rpy/sre_compile.py --- a/rpython/rlib/rsre/rpy/sre_compile.py +++ b/rpython/rlib/rsre/rpy/sre_compile.py @@ -63,7 +63,7 @@ emit(OPCODES[ANY]) elif op in REPEATING_CODES: if flags & SRE_FLAG_TEMPLATE: - raise error, "internal: unsupported template operator" + raise error("internal: unsupported template operator") emit(OPCODES[REPEAT]) skip = _len(code); emit(0) emit(av[0]) @@ -112,7 +112,7 @@ else: lo, hi = av[1].getwidth() if lo != hi: - raise error, "look-behind requires fixed-width pattern" + raise error("look-behind requires fixed-width pattern") emit(lo) # look behind _compile(code, av[1], flags) emit(OPCODES[SUCCESS]) @@ -173,7 +173,7 @@ else: code[skipyes] = _len(code) - skipyes + 1 else: - raise ValueError, ("unsupported operand type", op) + raise ValueError("unsupported operand type", op) def _compile_charset(charset, flags, code, fixup=None): # compile charset subprogram @@ -201,7 +201,7 @@ else: emit(CHCODES[av]) else: - raise error, "internal: unsupported set operator" + raise error("internal: unsupported set operator") emit(OPCODES[FAILURE]) def _optimize_charset(charset, fixup): diff --git a/rpython/rlib/rsre/rpy/sre_parse.py b/rpython/rlib/rsre/rpy/sre_parse.py --- a/rpython/rlib/rsre/rpy/sre_parse.py +++ b/rpython/rlib/rsre/rpy/sre_parse.py @@ -75,7 +75,7 @@ if name is not None: ogid = self.groupdict.get(name, None) if ogid is not None: - raise error, ("redefinition of group name %s as group %d; " + raise error("redefinition of group name %s as group %d; " "was group %d" % (repr(name), gid, ogid)) self.groupdict[name] = gid self.open.append(gid) @@ -188,7 +188,7 @@ try: c = self.string[self.index + 1] except IndexError: - raise error, "bogus escape (end of line)" + raise error("bogus escape (end of line)") char = char + c self.index = self.index + len(char) self.next = char @@ -238,7 +238,7 @@ escape = escape + source.get() escape = escape[2:] if len(escape) != 2: - raise error, "bogus escape: %s" % repr("\\" + escape) + raise error("bogus escape: %s" % repr("\\" + escape)) return LITERAL, int(escape, 16) & 0xff elif c in OCTDIGITS: # octal escape (up to three digits) @@ -247,12 +247,12 @@ escape = escape[1:] return LITERAL, int(escape, 8) & 0xff elif c in DIGITS: - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) def _escape(source, escape, state): # handle escape code in expression @@ -289,14 +289,14 @@ group = int(escape[1:]) if group < state.groups: if not state.checkgroup(group): - raise error, "cannot refer to open group" + raise error("cannot refer to open group") return GROUPREF, group raise ValueError if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) def _parse_sub(source, state, nested=1): # parse an alternation: a|b|c @@ -313,7 +313,7 @@ if not source.next or sourcematch(")", 0): break else: - raise error, "pattern not properly closed" + raise error("pattern not properly closed") if len(items) == 1: return items[0] @@ -362,11 +362,11 @@ if source.match("|"): item_no = _parse(source, state) if source.match("|"): - raise error, "conditional backref with more than two branches" + raise error("conditional backref with more than two branches") else: item_no = None if source.next and not source.match(")", 0): - raise error, "pattern not properly closed" + raise error("pattern not properly closed") subpattern = SubPattern(state) subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) return subpattern @@ -431,7 +431,7 @@ elif this: code1 = LITERAL, ord(this) else: - raise error, "unexpected end of regular expression" + raise error("unexpected end of regular expression") if sourcematch("-"): # potential range this = sourceget() @@ -447,14 +447,14 @@ else: code2 = LITERAL, ord(this) if code1[0] != LITERAL or code2[0] != LITERAL: - raise error, "bad character range" + raise error("bad character range") lo = code1[1] hi = code2[1] if hi < lo: - raise error, "bad character range" + raise error("bad character range") setappend((RANGE, (lo, hi))) else: - raise error, "unexpected end of regular expression" + raise error("unexpected end of regular expression") else: if code1[0] is IN: code1 = code1[1][0] @@ -507,16 +507,16 @@ if max < min: raise error("bad repeat interval") else: - raise error, "not supported" + raise error("not supported") # figure out which item to repeat if subpattern: item = subpattern[-1:] else: item = None if not item or (_len(item) == 1 and item[0][0] == AT): - raise error, "nothing to repeat" + raise error("nothing to repeat") if item[0][0] in REPEATCODES: - raise error, "multiple repeat" + raise error("multiple repeat") if sourcematch("?"): subpattern[-1] = (MIN_REPEAT, (min, max, item)) else: @@ -540,7 +540,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ">": break name = name + char @@ -556,7 +556,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ")": break name = name + char @@ -567,14 +567,14 @@ "%r" % name) gid = state.groupdict.get(name) if gid is None: - raise error, "unknown group name" + raise error("unknown group name") subpatternappend((GROUPREF, gid)) continue else: char = sourceget() if char is None: - raise error, "unexpected end of pattern" - raise error, "unknown specifier: ?P%s" % char + raise error("unexpected end of pattern") + raise error("unknown specifier: ?P%s" % char) elif sourcematch(":"): # non-capturing group group = 2 @@ -585,7 +585,7 @@ break sourceget() if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") continue elif source.next in ASSERTCHARS: # lookahead assertions @@ -593,12 +593,12 @@ dir = 1 if char == "<": if source.next not in LOOKBEHINDASSERTCHARS: - raise error, "syntax error" + raise error("syntax error") dir = -1 # lookbehind char = sourceget() p = _parse_sub(source, state) if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") if char == "=": subpatternappend((ASSERT, (dir, p))) else: @@ -610,7 +610,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ")": break condname = condname + char @@ -620,16 +620,16 @@ if isname(condname): condgroup = state.groupdict.get(condname) if condgroup is None: - raise error, "unknown group name" + raise error("unknown group name") else: try: condgroup = int(condname) except ValueError: - raise error, "bad character in group name" + raise error("bad character in group name") else: # flags if not source.next in FLAGS: - raise error, "unexpected end of pattern" + raise error("unexpected end of pattern") while source.next in FLAGS: state.flags = state.flags | FLAGS[sourceget()] if group: @@ -644,7 +644,7 @@ else: p = _parse_sub(source, state) if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") if group is not None: state.closegroup(group) subpatternappend((SUBPATTERN, (group, p))) @@ -652,10 +652,10 @@ while 1: char = sourceget() if char is None: - raise error, "unexpected end of pattern" + raise error("unexpected end of pattern") if char == ")": break - raise error, "unknown extension" + raise error("unknown extension") elif this == "^": subpatternappend((AT, AT_BEGINNING)) @@ -668,7 +668,7 @@ subpatternappend(code) else: - raise error, "parser error" + raise error("parser error") return subpattern @@ -686,9 +686,9 @@ tail = source.get() if tail == ")": - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") elif tail: - raise error, "bogus characters at end of regular expression" + raise error("bogus characters at end of regular expression") if flags & SRE_FLAG_DEBUG: p.dump() @@ -730,23 +730,23 @@ while 1: char = sget() if char is None: - raise error, "unterminated group name" + raise error("unterminated group name") if char == ">": break name = name + char if not name: - raise error, "missing group name" + raise error("missing group name") try: index = int(name) if index < 0: - raise error, "negative group number" + raise error("negative group number") except ValueError: if not isname(name): - raise error, "bad character in group name" + raise error("bad character in group name") try: index = pattern.groupindex[name] except KeyError: - raise IndexError, "unknown group name" + raise IndexError("unknown group name") a((MARK, index)) elif c == "0": if s.next in OCTDIGITS: @@ -796,7 +796,7 @@ for index, group in groups: literals[index] = s = g(group) if s is None: - raise error, "unmatched group" + raise error("unmatched group") except IndexError: - raise error, "invalid group reference" + raise error("invalid group reference") return sep.join(literals) diff --git a/rpython/rlib/rzipfile.py b/rpython/rlib/rzipfile.py --- a/rpython/rlib/rzipfile.py +++ b/rpython/rlib/rzipfile.py @@ -214,7 +214,7 @@ def _GetContents(self, fp): endrec = _EndRecData(fp) if not endrec: - raise BadZipfile, "File is not a zip file" + raise BadZipfile("File is not a zip file") size_cd = endrec.stuff[5] # bytes in central directory offset_cd = endrec.stuff[6] # offset of central directory self.comment = endrec.comment @@ -227,7 +227,7 @@ centdir = fp.read(46) total = total + 46 if centdir[0:4] != stringCentralDir: - raise BadZipfile, "Bad magic number for central directory" + raise BadZipfile("Bad magic number for central directory") centdir = runpack(structCentralDir, centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information @@ -255,7 +255,7 @@ fp.seek(data.header_offset, 0) fheader = fp.read(30) if fheader[0:4] != stringFileHeader: - raise BadZipfile, "Bad magic number for file header" + raise BadZipfile("Bad magic number for file header") fheader = runpack(structFileHeader, fheader) # file_offset is computed here, since the extra field for # the central directory and for the local file header @@ -266,9 +266,8 @@ + fheader[_FH_EXTRA_FIELD_LENGTH]) fname = fp.read(fheader[_FH_FILENAME_LENGTH]) if fname != data.orig_filename: - raise BadZipfile, \ - 'File name in directory "%s" and header "%s" differ.' % ( - data.orig_filename, fname) + raise BadZipfile('File name in directory "%s" and ' + 'header "%s" differ.' % (data.orig_filename, fname)) fp.seek(self.start_dir, 0) def getinfo(self, filename): @@ -296,15 +295,13 @@ finally: rzlib.inflateEnd(stream) elif zinfo.compress_type == ZIP_DEFLATED: - raise BadZipfile, \ - "Cannot decompress file, zlib not installed" + raise BadZipfile("Cannot decompress file, zlib not installed") else: - raise BadZipfile, \ - "Unsupported compression method %d for file %s" % \ - (zinfo.compress_type, filename) + raise BadZipfile("Unsupported compression method %d for " + "file %s" % (zinfo.compress_type, filename)) crc = crc32(bytes) if crc != zinfo.CRC: - raise BadZipfile, "Bad CRC-32 for file %s" % filename + raise BadZipfile("Bad CRC-32 for file %s" % filename) return bytes finally: fp.close() diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -95,7 +95,7 @@ elif whence == 2: offset += len(self.buf) else: - raise ValueError, "whence should be 0, 1 or 2" + raise ValueError("whence should be 0, 1 or 2") if offset < 0: offset = 0 self.pos = offset diff --git a/rpython/rlib/unicodedata/unicodedb_5_2_0.py b/rpython/rlib/unicodedata/unicodedb_5_2_0.py --- a/rpython/rlib/unicodedata/unicodedb_5_2_0.py +++ b/rpython/rlib/unicodedata/unicodedb_5_2_0.py @@ -39,7 +39,7 @@ charnode = left else: charnode = right - raise KeyError, name + raise KeyError(name) def name_of_node(charnode): res = [] @@ -112664,7 +112664,7 @@ if code == 917505: res = 9201 if 917536 <= code <= 917631: res = _charnames_917536[code-917536] if 917760 <= code <= 917999: res = _charnames_917760[code-917760] - if res == -1: raise KeyError, code + if res == -1: raise KeyError(code) return name_of_node(res) # the following dictionary is used by modules that take this as a base From noreply at buildbot.pypy.org Sun May 4 03:33:00 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 May 2014 03:33:00 +0200 (CEST) Subject: [pypy-commit] pypy default: modernise raise syntax Message-ID: <20140504013300.DFF4E1D236E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71252:5385c2f50d5c Date: 2014-05-04 02:32 +0100 http://bitbucket.org/pypy/pypy/changeset/5385c2f50d5c/ Log: modernise raise syntax diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -58,7 +58,7 @@ try: holders = arguments.match_signature(signature, defs_h) except ArgErr, e: - raise TyperError, "signature mismatch: %s" % e.getmsg(graph.name) + raise TyperError("signature mismatch: %s" % e.getmsg(graph.name)) assert len(holders) == len(rinputs), "argument parsing mismatch" vlist = [] diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -257,7 +257,7 @@ @classmethod def _malloc(cls, n=None): if not isinstance(n, int): - raise TypeError, "array length must be an int" + raise TypeError("array length must be an int") biggercls = get_ctypes_array_of_size(A, n) bigarray = allocate_ctypes(biggercls) if hasattr(bigarray, 'length'): diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,7 +191,7 @@ _adtmeths = {} def _inline_is_varsize(self, last): - raise TypeError, "%r cannot be inlined in structure" % self + raise TypeError("%r cannot be inlined in structure" % self) def _install_extras(self, adtmeths={}, hints={}): self._adtmeths = frozendict(adtmeths) @@ -253,7 +253,7 @@ self._arrayfld = None for name, typ in fields: if name.startswith('_'): - raise NameError, ("%s: field name %r should not start with " + raise NameError("%s: field name %r should not start with " "an underscore" % (self._name, name,)) names.append(name) if name in flds: @@ -311,8 +311,8 @@ def _nofield(self, name): - raise AttributeError, 'struct %s has no field %r' % (self._name, - name) + raise AttributeError('struct %s has no field %r' % (self._name, + name)) def _names_without_voids(self): names_without_voids = [name for name in self._names if self._flds[name] is not Void] @@ -545,7 +545,7 @@ self.ARGS = tuple(args) assert isinstance(result, LowLevelType) if isinstance(result, ContainerType): - raise TypeError, "function result can only be primitive or pointer" + raise TypeError("function result can only be primitive or pointer") self.RESULT = result self.ABI = abi @@ -602,7 +602,7 @@ return "%s (gcopaque)" % self.tag def _inline_is_varsize(self, last): - raise TypeError, "%r cannot be inlined in structure" % self + raise TypeError("%r cannot be inlined in structure" % self) class ForwardReference(ContainerType): @@ -714,7 +714,7 @@ _cache = WeakValueDictionary() # cache the Ptrs def __new__(cls, TO, use_cache=True): if not isinstance(TO, ContainerType): - raise TypeError, ("can only point to a Container type, " + raise TypeError("can only point to a Container type, " "not to %s" % (TO,)) if not use_cache: obj = LowLevelType.__new__(cls) @@ -835,7 +835,7 @@ def cast_primitive(TGT, value): ORIG = typeOf(value) if not isinstance(TGT, Primitive) or not isinstance(ORIG, Primitive): - raise TypeError, "can only primitive to primitive" + raise TypeError("can only primitive to primitive") if ORIG == TGT: return value if ORIG == Char or ORIG == UniChar: @@ -855,7 +855,7 @@ return float(value) if ORIG == LongFloat and TGT == Float: return float(value) - raise TypeError, "unsupported cast" + raise TypeError("unsupported cast") def _cast_whatever(TGT, value): from rpython.rtyper.lltypesystem import llmemory, rffi @@ -932,13 +932,13 @@ def cast_pointer(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): - raise TypeError, "can only cast pointers to other pointers" + raise TypeError("can only cast pointers to other pointers") return ptr._cast_to(PTRTYPE) def cast_opaque_ptr(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): - raise TypeError, "can only cast pointers to other pointers" + raise TypeError("can only cast pointers to other pointers") if CURTYPE == PTRTYPE: return ptr if CURTYPE.TO._gckind != PTRTYPE.TO._gckind: @@ -989,9 +989,9 @@ """ CURTYPE = typeOf(structptr).TO if not isinstance(CURTYPE, Struct): - raise TypeError, "direct_fieldptr: not a struct" + raise TypeError("direct_fieldptr: not a struct") if fieldname not in CURTYPE._flds: - raise TypeError, "%s has no field %r" % (CURTYPE, fieldname) + raise TypeError("%s has no field %r" % (CURTYPE, fieldname)) if not structptr: raise RuntimeError("direct_fieldptr: NULL argument") return _subarray._makeptr(structptr._obj, fieldname, structptr._solid) @@ -1004,7 +1004,7 @@ """ CURTYPE = typeOf(arrayptr).TO if not isinstance(CURTYPE, (Array, FixedSizeArray)): - raise TypeError, "direct_arrayitems: not an array" + raise TypeError("direct_arrayitems: not an array") if not arrayptr: raise RuntimeError("direct_arrayitems: NULL argument") return _subarray._makeptr(arrayptr._obj, 0, arrayptr._solid) @@ -1247,7 +1247,7 @@ from rpython.rtyper.lltypesystem import rffi if isinstance(self._T, FuncType): if len(args) != len(self._T.ARGS): - raise TypeError,"calling %r with wrong argument number: %r" % (self._T, args) + raise TypeError("calling %r with wrong argument number: %r" % (self._T, args)) for i, a, ARG in zip(range(len(self._T.ARGS)), args, self._T.ARGS): if typeOf(a) != ARG: # ARG could be Void @@ -1272,11 +1272,11 @@ pass else: args_repr = [typeOf(arg) for arg in args] - raise TypeError, ("calling %r with wrong argument " + raise TypeError("calling %r with wrong argument " "types: %r" % (self._T, args_repr)) callb = self._obj._callable if callb is None: - raise RuntimeError,"calling undefined function" + raise RuntimeError("calling undefined function") return callb(*args) raise TypeError("%r instance is not a function" % (self._T,)) @@ -1421,7 +1421,7 @@ self._set_offsets(_offsets) def __nonzero__(self): - raise RuntimeError, "do not test an interior pointer for nullity" + raise RuntimeError("do not test an interior pointer for nullity") def _get_obj(self): ob = self._parent @@ -1657,9 +1657,9 @@ def __init__(self, TYPE, n, initialization=None, parent=None, parentindex=None): if not is_valid_int(n): - raise TypeError, "array length must be an int" + raise TypeError("array length must be an int") if n < 0: - raise ValueError, "negative array length" + raise ValueError("negative array length") _parentable.__init__(self, TYPE) myrange = self._check_range(n) self.items = [TYPE.OF._allocate(initialization=initialization, @@ -1977,9 +1977,9 @@ assert n is None o = _opaque(T, initialization=initialization) else: - raise TypeError, "malloc: unmallocable type" + raise TypeError("malloc: unmallocable type") if flavor == 'gc' and T._gckind != 'gc' and not immortal: - raise TypeError, "gc flavor malloc of a non-GC non-immortal structure" + raise TypeError("gc flavor malloc of a non-GC non-immortal structure") if flavor == "raw" and not immortal and track_allocation: leakfinder.remember_malloc(o, framedepth=2) solid = immortal or flavor == 'raw' @@ -1987,10 +1987,10 @@ def free(p, flavor, track_allocation=True): if flavor.startswith('gc'): - raise TypeError, "gc flavor free" + raise TypeError("gc flavor free") T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': - raise TypeError, "free(): only for pointers to non-gc containers" + raise TypeError("free(): only for pointers to non-gc containers") if track_allocation: leakfinder.remember_free(p._obj0) p._obj0._free() @@ -1998,7 +1998,7 @@ def render_immortal(p, track_allocation=True): T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': - raise TypeError, "free(): only for pointers to non-gc containers" + raise TypeError("free(): only for pointers to non-gc containers") if track_allocation: leakfinder.remember_free(p._obj0) @@ -2033,7 +2033,7 @@ def functionptr(TYPE, name, **attrs): if not isinstance(TYPE, FuncType): - raise TypeError, "functionptr() for FuncTypes only" + raise TypeError("functionptr() for FuncTypes only") try: hash(tuple(attrs.items())) except TypeError: @@ -2046,7 +2046,7 @@ def opaqueptr(TYPE, name, **attrs): if not isinstance(TYPE, OpaqueType): - raise TypeError, "opaqueptr() for OpaqueTypes only" + raise TypeError("opaqueptr() for OpaqueTypes only") o = _opaque(TYPE, _name=name, **attrs) return _ptr(Ptr(TYPE), o, solid=True) @@ -2064,23 +2064,23 @@ def attachRuntimeTypeInfo(GCSTRUCT, funcptr=None, destrptr=None, customtraceptr=None): if not isinstance(GCSTRUCT, RttiStruct): - raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT + raise TypeError("expected a RttiStruct: %s" % GCSTRUCT) GCSTRUCT._attach_runtime_type_info_funcptr(funcptr, destrptr, customtraceptr) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) def getRuntimeTypeInfo(GCSTRUCT): if not isinstance(GCSTRUCT, RttiStruct): - raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT + raise TypeError("expected a RttiStruct: %s" % GCSTRUCT) if GCSTRUCT._runtime_type_info is None: - raise ValueError, ("no attached runtime type info for GcStruct %s" % + raise ValueError("no attached runtime type info for GcStruct %s" % GCSTRUCT._name) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) def runtime_type_info(p): T = typeOf(p) if not isinstance(T, Ptr) or not isinstance(T.TO, RttiStruct): - raise TypeError, "runtime_type_info on non-RttiStruct pointer: %s" % p + raise TypeError("runtime_type_info on non-RttiStruct pointer: %s" % p) struct = p._obj top_parent = top_container(struct) result = getRuntimeTypeInfo(top_parent._TYPE) @@ -2090,7 +2090,7 @@ T = typeOf(query_funcptr).TO.ARGS[0] result2 = query_funcptr(cast_pointer(T, p)) if result != result2: - raise RuntimeError, ("runtime type-info function for %s:\n" + raise RuntimeError("runtime type-info function for %s:\n" " returned: %s,\n" "should have been: %s" % (p, result2, result)) return result diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -379,7 +379,7 @@ def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr if not (hop.args_r[1] == rstr.char_repr and hop.args_r[2] == rstr.char_repr): - raise TyperError, 'replace only works for char args' + raise TyperError('replace only works for char args') v_str, v_c1, v_c2 = hop.inputargs(rstr.repr, rstr.char_repr, rstr.char_repr) hop.exception_cannot_occur() return hop.gendirectcall(self.ll.ll_replace_chr_chr, v_str, v_c1, v_c2) From noreply at buildbot.pypy.org Sun May 4 04:27:20 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 May 2014 04:27:20 +0200 (CEST) Subject: [pypy-commit] pypy default: modernise raise syntax Message-ID: <20140504022720.B68CC1C088E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71253:6045f686a110 Date: 2014-05-04 03:26 +0100 http://bitbucket.org/pypy/pypy/changeset/6045f686a110/ Log: modernise raise syntax diff --git a/rpython/translator/backendopt/all.py b/rpython/translator/backendopt/all.py --- a/rpython/translator/backendopt/all.py +++ b/rpython/translator/backendopt/all.py @@ -22,12 +22,12 @@ try: mod = __import__(module, {}, {}, ['__doc__']) except ImportError, e: - raise Exception, "Import error loading %s: %s" % (dottedname, e) + raise Exception("Import error loading %s: %s" % (dottedname, e)) try: func = getattr(mod, name) except AttributeError: - raise Exception, "Function %s not found in module" % dottedname + raise Exception("Function %s not found in module" % dottedname) return func diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -178,7 +178,7 @@ else: return self.db.get(value) else: - raise TypeError, "expr(%r)" % (v,) + raise TypeError("expr(%r)" % (v,)) # ____________________________________________________________ diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -934,7 +934,7 @@ elif hasattr(fnobj._callable, "c_name"): return [] else: - raise ValueError, "don't know how to generate code for %r" % (fnobj,) + raise ValueError("don't know how to generate code for %r" % (fnobj,)) class ExtType_OpaqueNode(ContainerNode): nodekind = 'rpyopaque' diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -627,7 +627,7 @@ elif output.startswith('T'): return output[1:] else: - raise ValueError, 'probing for env var returned %r' % (output,) + raise ValueError('probing for env var returned %r' % (output,)) def test_dictlike_environ_getitem(): def fn(s): diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -234,9 +234,9 @@ if os.WIFEXITED(status): status = os.WEXITSTATUS(status) if status != 0: - raise Exception, "instrumentation child failed: %d" % status + raise Exception("instrumentation child failed: %d" % status) else: - raise Exception, "instrumentation child aborted" + raise Exception("instrumentation child aborted") import array, struct n = datafile.size()//struct.calcsize('L') datafile = datafile.open('rb') diff --git a/rpython/translator/gensupp.py b/rpython/translator/gensupp.py --- a/rpython/translator/gensupp.py +++ b/rpython/translator/gensupp.py @@ -39,7 +39,7 @@ before generating any new names.""" for name in txt.split(): if name in self.seennames: - raise NameError, "%s has already been seen!" + raise NameError("%s has already been seen!") self.seennames[name] = 1 def _ensure_unique(self, basename): diff --git a/rpython/translator/goal/bpnn.py b/rpython/translator/goal/bpnn.py --- a/rpython/translator/goal/bpnn.py +++ b/rpython/translator/goal/bpnn.py @@ -74,7 +74,7 @@ def update(self, inputs): if len(inputs) != self.ni-1: - raise ValueError, 'wrong number of inputs' + raise ValueError('wrong number of inputs') # input activations for i in range(self.ni-1): @@ -100,7 +100,7 @@ def backPropagate(self, targets, N, M): if len(targets) != self.no: - raise ValueError, 'wrong number of target values' + raise ValueError('wrong number of target values') # calculate error terms for output output_deltas = [0.0] * self.no diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -20,8 +20,8 @@ try: subprocess.check_output([cc, '--version']) except: - raise ValueError,"Could not find compiler specified by cc option" + \ - " '%s', it must be a valid exe file on your path"%cc + raise ValueError("Could not find compiler specified by cc option '%s'," + " it must be a valid exe file on your path" % cc) return MingwPlatform(cc) def Windows(cc=None): @@ -31,7 +31,7 @@ raise Exception("Win64 is not supported. You must either build for Win32" " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) - + def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -94,7 +94,7 @@ name = "msvc" so_ext = 'dll' exe_ext = 'exe' - + relevant_environ = ('PATH', 'INCLUDE', 'LIB') cc = 'cl.exe' @@ -105,7 +105,7 @@ standalone_only = () shared_only = () environ = None - + def __init__(self, cc=None, x64=False): self.x64 = x64 msvc_compiler_environ = find_msvc_env(x64) @@ -134,7 +134,7 @@ else: masm32 = 'ml.exe' masm64 = 'ml64.exe' - + if x64: self.masm = masm64 else: @@ -338,10 +338,10 @@ definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) rules.append(('$(OBJECTS)', 'stdafx.pch', [])) - rules.append(('stdafx.pch', 'stdafx.h', + rules.append(('stdafx.pch', 'stdafx.h', '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '$(CREATE_PCH) $(INCLUDEDIRS)')) - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) #Do not use precompiled headers for some files @@ -361,7 +361,7 @@ '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) else: - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) @@ -371,7 +371,7 @@ for rule in rules: m.rule(*rule) - + if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -116,7 +116,7 @@ print >>f, " ",op print >>f, '--end--' return - raise TypeError, "don't know about %r" % x + raise TypeError("don't know about %r" % x) def view(self): From noreply at buildbot.pypy.org Sun May 4 07:10:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 07:10:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix test_bytesio Message-ID: <20140504051009.153141D236E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71254:27d20a9db698 Date: 2014-05-04 01:09 -0400 http://bitbucket.org/pypy/pypy/changeset/27d20a9db698/ Log: fix test_bytesio diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -44,7 +44,7 @@ assert f.write(b"") == 0 assert f.write(b"hello") == 5 exc = raises(TypeError, f.write, u"lo") - assert str(exc.value) == "'str' does not have the buffer interface" + assert str(exc.value) == "'str' does not support the buffer interface" import gc; gc.collect() assert f.getvalue() == b"hello" f.close() From noreply at buildbot.pypy.org Sun May 4 07:17:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 07:17:42 +0200 (CEST) Subject: [pypy-commit] pypy py3k: marshal.loads use buffers correctly Message-ID: <20140504051742.1C6761D2371@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71255:9f5a42a7419d Date: 2014-05-04 01:17 -0400 http://bitbucket.org/pypy/pypy/changeset/9f5a42a7419d/ Log: marshal.loads use buffers correctly diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -466,9 +466,9 @@ # Unmarshaller with inlined buffer string def __init__(self, space, w_str): Unmarshaller.__init__(self, space, None) - self.bufstr = space.getarg_w('s#', w_str) + self.buf = space.getarg_w('y*', w_str) self.bufpos = 0 - self.limit = len(self.bufstr) + self.limit = self.buf.getlength() def raise_eof(self): space = self.space @@ -481,14 +481,14 @@ if newpos > self.limit: self.raise_eof() self.bufpos = newpos - return self.bufstr[pos : newpos] + return self.buf.getslice(pos, newpos, 1, newpos - pos) def get1(self): pos = self.bufpos if pos >= self.limit: self.raise_eof() self.bufpos = pos + 1 - return self.bufstr[pos] + return self.buf.getitem(pos) def get_int(self): pos = self.bufpos @@ -496,10 +496,10 @@ if newpos > self.limit: self.raise_eof() self.bufpos = newpos - a = ord(self.bufstr[pos]) - b = ord(self.bufstr[pos+1]) - c = ord(self.bufstr[pos+2]) - d = ord(self.bufstr[pos+3]) + a = ord(self.buf.getitem(pos)) + b = ord(self.buf.getitem(pos+1)) + c = ord(self.buf.getitem(pos+2)) + d = ord(self.buf.getitem(pos+3)) if d & 0x80: d -= 0x100 x = a | (b<<8) | (c<<16) | (d<<24) @@ -511,10 +511,10 @@ if newpos > self.limit: self.raise_eof() self.bufpos = newpos - a = ord(self.bufstr[pos]) - b = ord(self.bufstr[pos+1]) - c = ord(self.bufstr[pos+2]) - d = ord(self.bufstr[pos+3]) + a = ord(self.buf.getitem(pos)) + b = ord(self.buf.getitem(pos+1)) + c = ord(self.buf.getitem(pos+2)) + d = ord(self.buf.getitem(pos+3)) x = a | (b<<8) | (c<<16) | (d<<24) if x >= 0: return x From noreply at buildbot.pypy.org Sun May 4 08:47:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 08:47:41 +0200 (CEST) Subject: [pypy-commit] pypy default: replace some usages of bufferstr Message-ID: <20140504064741.E59D01D2371@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71256:ab7d9bdd72c7 Date: 2014-05-04 02:34 -0400 http://bitbucket.org/pypy/pypy/changeset/ab7d9bdd72c7/ Log: replace some usages of bufferstr diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -40,10 +40,10 @@ self.check_valid() return self.space.wrap(self.mmap.read(num)) - @unwrap_spec(tofind='bufferstr') - def find(self, tofind, w_start=None, w_end=None): + def find(self, w_tofind, w_start=None, w_end=None): self.check_valid() space = self.space + tofind = space.getarg_w('s#', w_tofind) if w_start is None: start = self.mmap.pos else: @@ -54,10 +54,10 @@ end = space.getindex_w(w_end, None) return space.wrap(self.mmap.find(tofind, start, end)) - @unwrap_spec(tofind='bufferstr') - def rfind(self, tofind, w_start=None, w_end=None): + def rfind(self, w_tofind, w_start=None, w_end=None): self.check_valid() space = self.space + tofind = space.getarg_w('s#', w_tofind) if w_start is None: start = self.mmap.pos else: @@ -87,9 +87,9 @@ except OSError, e: raise mmap_error(self.space, e) - @unwrap_spec(data='bufferstr') - def write(self, data): + def write(self, w_data): self.check_valid() + data = self.space.getarg_w('s#', w_data) self.check_writeable() try: self.mmap.write(data) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -142,12 +142,13 @@ else: return space.wrap(s) - at unwrap_spec(fd=c_int, data='bufferstr') -def write(space, fd, data): + at unwrap_spec(fd=c_int) +def write(space, fd, w_data): """Write a string to a file descriptor. Return the number of bytes actually written, which may be smaller than len(data).""" + data = space.getarg_w('s*', w_data) try: - res = os.write(fd, data) + res = os.write(fd, data.as_str()) except OSError, e: raise wrap_oserror(space, e) else: From noreply at buildbot.pypy.org Sun May 4 08:47:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 08:47:43 +0200 (CEST) Subject: [pypy-commit] pypy default: properly use buffers in struct.unpack_from Message-ID: <20140504064743.3CB541D2371@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71257:99632e367e42 Date: 2014-05-04 02:46 -0400 http://bitbucket.org/pypy/pypy/changeset/99632e367e42/ Log: properly use buffers in struct.unpack_from diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -8,7 +8,6 @@ class PackFormatIterator(FormatIterator): - def __init__(self, space, args_w, size): self.space = space self.args_w = args_w @@ -105,11 +104,11 @@ class UnpackFormatIterator(FormatIterator): - - def __init__(self, space, input): + def __init__(self, space, buf): self.space = space - self.input = input - self.inputpos = 0 + self.buf = buf + self.length = buf.getlength() + self.pos = 0 self.result_w = [] # list of wrapped objects # See above comment on operate. @@ -124,18 +123,18 @@ _operate_is_specialized_ = True def align(self, mask): - self.inputpos = (self.inputpos + mask) & ~mask + self.pos = (self.pos + mask) & ~mask def finished(self): - if self.inputpos != len(self.input): + if self.pos != self.length: raise StructError("unpack str size too long for format") def read(self, count): - end = self.inputpos + count - if end > len(self.input): + end = self.pos + count + if end > self.length: raise StructError("unpack str size too short for format") - s = self.input[self.inputpos : end] - self.inputpos = end + s = self.buf.getslice(self.pos, end, 1, end - self.pos) + self.pos = end return s @specialize.argtype(1) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,4 +1,5 @@ from rpython.rlib import jit +from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator from rpython.tool.sourcetools import func_with_new_name @@ -65,9 +66,8 @@ buf.setslice(offset, res) - at unwrap_spec(format=str, input='bufferstr') -def unpack(space, format, input): - fmtiter = UnpackFormatIterator(space, input) +def _unpack(space, format, buf): + fmtiter = UnpackFormatIterator(space, buf) try: fmtiter.interpret(format) except StructOverflowError, e: @@ -79,11 +79,16 @@ return space.newtuple(fmtiter.result_w[:]) -# XXX inefficient + at unwrap_spec(format=str) +def unpack(space, format, w_str): + buf = space.getarg_w('s*', w_str) + return _unpack(space, format, buf) + + @unwrap_spec(format=str, offset=int) -def unpack_from(space, format, w_buf, offset=0): +def unpack_from(space, format, w_buffer, offset=0): size = _calcsize(space, format) - buf = space.getarg_w('z*', w_buf) + buf = space.getarg_w('z*', w_buffer) if buf is None: w_module = space.getbuiltinmodule('struct') w_error = space.getattr(w_module, space.wrap('error')) @@ -96,8 +101,8 @@ raise oefmt(w_error, "unpack_from requires a buffer of at least %d bytes", size) - data = buf.getslice(offset, offset + size, 1, size) - return unpack(space, format, data) + buf = SubBuffer(buf, offset, size) + return _unpack(space, format, buf) class W_Struct(W_Root): From noreply at buildbot.pypy.org Sun May 4 16:03:00 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 4 May 2014 16:03:00 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: change my name, add acks Message-ID: <20140504140300.7C2DC1D293D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5226:77a8df646a80 Date: 2014-05-04 16:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/77a8df646a80/ Log: change my name, add acks diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -40,7 +40,7 @@ \title{The Way Forward in Parallelizing Dynamic Languages} \subtitle{Position Paper, ICOOOLPS'14} -\authorinfo{Remi Meier} +\authorinfo{Remigius Meier} {Department of Computer Science\\ ETH Zürich} {remi.meier at inf.ethz.ch} \authorinfo{Armin Rigo} @@ -428,9 +428,9 @@ %% This is the text of the appendix, if you need one. -%% \acks - -%% Acknowledgements... +\acks +We would like to thank Maciej Fijalkowski and Carl Friedrich Bolz for +their valuable inputs and the many fruitful discussions. % We recommend abbrvnat bibliography style. From noreply at buildbot.pypy.org Sun May 4 16:50:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 16:50:06 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Failing test Message-ID: <20140504145006.EE4AF1D23D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1195:677236438589 Date: 2014-05-04 16:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/677236438589/ Log: Failing test diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -208,14 +208,16 @@ def test_double_abort_markers_cb_inevitable(self): @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d\x00' % (number,) + s = '%d %r\x00' % (number, stm_get_char(p)) assert len(s) <= outbufsize outbuf[0:len(s)] = s lib.stmcb_expand_marker = expand_marker # self.start_transaction() + p = stm_allocate(16) + stm_set_char(p, 'A') self.push_root(ffi.cast("object_t *", 19)) - self.push_root(ffi.cast("object_t *", ffi.NULL)) + self.push_root(ffi.cast("object_t *", p)) self.become_inevitable() self.pop_root() self.pop_root() @@ -225,14 +227,16 @@ # self.switch(1) self.start_transaction() + p = stm_allocate(16) + stm_set_char(p, 'B') self.push_root(ffi.cast("object_t *", 21)) - self.push_root(ffi.cast("object_t *", ffi.NULL)) + self.push_root(ffi.cast("object_t *", p)) py.test.raises(Conflict, self.become_inevitable) # tl = self.get_stm_thread_local() assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE - assert ffi.string(tl.longest_marker_self) == '21' - assert ffi.string(tl.longest_marker_other) == '19' + assert ffi.string(tl.longest_marker_self) == "21 'B'" + assert ffi.string(tl.longest_marker_other) == "19 'A'" def test_read_write_contention(self): @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") From noreply at buildbot.pypy.org Sun May 4 16:50:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 16:50:07 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Fix the test Message-ID: <20140504145007.EC0521D23D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1196:dadefd7d75ad Date: 2014-05-04 16:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/dadefd7d75ad/ Log: Fix the test diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py --- a/c7/test/test_marker.py +++ b/c7/test/test_marker.py @@ -208,7 +208,8 @@ def test_double_abort_markers_cb_inevitable(self): @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") def expand_marker(base, number, ptr, outbuf, outbufsize): - s = '%d %r\x00' % (number, stm_get_char(p)) + c = (base + int(ffi.cast("uintptr_t", ptr)))[8] + s = '%d %r\x00' % (number, c) assert len(s) <= outbufsize outbuf[0:len(s)] = s lib.stmcb_expand_marker = expand_marker From noreply at buildbot.pypy.org Sun May 4 16:50:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 16:50:08 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Fix the test Message-ID: <20140504145008.E3B4B1D23D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1197:a62dc8720373 Date: 2014-05-04 16:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/a62dc8720373/ Log: Fix the test diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -244,6 +244,12 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + uintptr_t *pmarker_inev_obj = (uintptr_t *) + REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->marker_inev[1]); + minor_trace_if_young((object_t **)pmarker_inev_obj); + } } static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) From noreply at buildbot.pypy.org Sun May 4 16:50:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 16:50:09 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Untabbify Message-ID: <20140504145009.D3CE11D23D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1198:822e52f17647 Date: 2014-05-04 16:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/822e52f17647/ Log: Untabbify diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -170,19 +170,19 @@ switch (kind) { case WRITE_WRITE_CONTENTION: marker_fetch_obj_write(other_segment_num, obj, other_marker); - marker_expand(other_marker, other_segment_base, outmarker); + marker_expand(other_marker, other_segment_base, outmarker); break; case INEVITABLE_CONTENTION: assert(abort_other == false); other_marker[0] = other_pseg->marker_inev[0]; other_marker[1] = other_pseg->marker_inev[1]; - marker_expand(other_marker, other_segment_base, outmarker); + marker_expand(other_marker, other_segment_base, outmarker); break; case WRITE_READ_CONTENTION: - strcpy(outmarker, ""); - break; + strcpy(outmarker, ""); + break; default: - outmarker[0] = 0; + outmarker[0] = 0; break; } From noreply at buildbot.pypy.org Sun May 4 16:52:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 16:52:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/822e52f17647 (branch "marker") Message-ID: <20140504145200.F02251D23D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71258:8e81e8026bd6 Date: 2014-05-04 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/8e81e8026bd6/ Log: import stmgc/822e52f17647 (branch "marker") diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -937201ff1335 +822e52f17647 diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -171,19 +171,19 @@ switch (kind) { case WRITE_WRITE_CONTENTION: marker_fetch_obj_write(other_segment_num, obj, other_marker); - marker_expand(other_marker, other_segment_base, outmarker); + marker_expand(other_marker, other_segment_base, outmarker); break; case INEVITABLE_CONTENTION: assert(abort_other == false); other_marker[0] = other_pseg->marker_inev[0]; other_marker[1] = other_pseg->marker_inev[1]; - marker_expand(other_marker, other_segment_base, outmarker); + marker_expand(other_marker, other_segment_base, outmarker); break; case WRITE_READ_CONTENTION: - strcpy(outmarker, ""); - break; + strcpy(outmarker, ""); + break; default: - outmarker[0] = 0; + outmarker[0] = 0; break; } diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -245,6 +245,12 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + uintptr_t *pmarker_inev_obj = (uintptr_t *) + REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->marker_inev[1]); + minor_trace_if_young((object_t **)pmarker_inev_obj); + } } static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) From noreply at buildbot.pypy.org Sun May 4 18:19:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 18:19:36 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Change the "..." marker into "<" or ">", which are more distinctive in filenames Message-ID: <20140504161936.5418B1C02D9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71259:5f88689ad670 Date: 2014-05-04 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/5f88689ad670/ Log: Change the "..." marker into "<" or ">", which are more distinctive in filenames diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -61,9 +61,9 @@ if match and match.group(1) != '?': filename = match.group(1) lineno = int(match.group(2)) - if filename.startswith('...'): + if filename.startswith('<') and not filename.endswith('>'): if filename not in _fullfilenames: - partial = filename[3:] + partial = filename[1:] found = set() for module in sys.modules.values(): try: diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -98,7 +98,7 @@ name = _RPyString_AsString(co_name); if (nlen > remaining / 2) { nlen = remaining / 2; - ntrunc = "..."; + ntrunc = "<"; } remaining -= nlen; @@ -107,7 +107,7 @@ if (fnlen > remaining) { fn += (fnlen - remaining); fnlen = remaining; - fntrunc = "..."; + fntrunc = ">"; } long lnotablen = RPyString_Size(co_lnotab); From noreply at buildbot.pypy.org Sun May 4 18:19:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 18:19:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Do push_marker/pop_marker inside the jit_driver loop inside of outside. Previously, during Message-ID: <20140504161937.876821C02D9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71260:1d6e499b69ca Date: 2014-05-04 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/1d6e499b69ca/ Log: Do push_marker/pop_marker inside the jit_driver loop inside of outside. Previously, during blackhole-interpretation, we would see update_marker_num() calls with no corresponding pushed marker, which leads to crashes. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -210,15 +210,12 @@ if next_instr != 0: self.pushvalue(w_inputvalue) # - rstm.push_marker(intmask(next_instr) * 2 + 1, self.pycode) try: w_exitvalue = self.dispatch(self.pycode, next_instr, executioncontext) except Exception: - rstm.pop_marker() executioncontext.return_trace(self, self.space.w_None) raise - rstm.pop_marker() executioncontext.return_trace(self, w_exitvalue) # it used to say self.last_exception = None # this is now done by the code in pypyjit module diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -65,18 +65,20 @@ # For the sequel, force 'next_instr' to be unsigned for performance next_instr = r_uint(next_instr) co_code = pycode.co_code - try: - while True: - if self.space.config.translation.stm: - # only used for no-jit. The jit-jitdriver is - # in interp_jit.py - stmonly_jitdriver.jit_merge_point( - self=self, co_code=co_code, - next_instr=next_instr, ec=ec) + while True: + if self.space.config.translation.stm: + # only used for no-jit. The jit-jitdriver is + # in interp_jit.py + stmonly_jitdriver.jit_merge_point( + self=self, co_code=co_code, + next_instr=next_instr, ec=ec) + rstm.push_marker(intmask(next_instr) * 2 + 1, co_code) + try: next_instr = self.handle_bytecode(co_code, next_instr, ec) - rstm.update_marker_num(intmask(next_instr) * 2 + 1) - except ExitFrame: - return self.popvalue() + except ExitFrame: + return self.popvalue() + finally: + rstm.pop_marker() def handle_bytecode(self, co_code, next_instr, ec): try: diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -52,28 +52,29 @@ def dispatch(self, pycode, next_instr, ec): self = hint(self, access_directly=True) next_instr = r_uint(next_instr) - is_being_profiled = self.is_being_profiled - try: - while True: - pypyjitdriver.jit_merge_point(ec=ec, - frame=self, next_instr=next_instr, pycode=pycode, - is_being_profiled=is_being_profiled) - # nothing inbetween! - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() - co_code = pycode.co_code - self.valuestackdepth = hint(self.valuestackdepth, promote=True) + while True: + pypyjitdriver.jit_merge_point(ec=ec, + frame=self, next_instr=next_instr, pycode=pycode, + is_being_profiled=self.is_being_profiled) + # nothing inbetween! + if rstm.jit_stm_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() + + co_code = pycode.co_code + self.valuestackdepth = hint(self.valuestackdepth, promote=True) + rstm.push_marker(intmask(next_instr) * 2 + 1, co_code) + try: next_instr = self.handle_bytecode(co_code, next_instr, ec) - rstm.update_marker_num(intmask(next_instr) * 2 + 1) - is_being_profiled = self.is_being_profiled - except Yield: - self.last_exception = None - w_result = self.popvalue() - jit.hint(self, force_virtualizable=True) - return w_result - except ExitFrame: - self.last_exception = None - return self.popvalue() + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result + except ExitFrame: + self.last_exception = None + return self.popvalue() + finally: + rstm.pop_marker() def jump_absolute(self, jumpto, ec): if we_are_jitted(): From noreply at buildbot.pypy.org Sun May 4 18:41:30 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 May 2014 18:41:30 +0200 (CEST) Subject: [pypy-commit] pypy default: kill unused opcode_method_names Message-ID: <20140504164130.767DB1C02D9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71261:6681c9f6226c Date: 2014-05-04 17:40 +0100 http://bitbucket.org/pypy/pypy/changeset/6681c9f6226c/ Log: kill unused opcode_method_names diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -7,7 +7,6 @@ import __builtin__ from rpython.tool.error import source_lines -from rpython.tool.stdlib_opcode import host_bytecode_spec from rpython.rlib import rstackovf from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, @@ -305,8 +304,6 @@ ] class FlowContext(object): - opcode_method_names = host_bytecode_spec.method_names - def __init__(self, graph, code): self.graph = graph func = graph.func From noreply at buildbot.pypy.org Sun May 4 20:25:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 20:25:00 +0200 (CEST) Subject: [pypy-commit] pypy default: remove loop from StringBuffer.getslice Message-ID: <20140504182501.0123B1C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71262:5cf4ae8fccac Date: 2014-05-04 12:48 -0400 http://bitbucket.org/pypy/pypy/changeset/5cf4ae8fccac/ Log: remove loop from StringBuffer.getslice diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -61,7 +61,7 @@ if step == 1: assert 0 <= start <= stop return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) + return Buffer.getslice(self, start, stop, step, size) class SubBuffer(Buffer): diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -6,4 +6,5 @@ assert buf.getitem(4) == 'o' assert buf.getlength() == 11 assert buf.getslice(1, 6, 1, 5) == 'ello ' + assert buf.getslice(1, 6, 2, 3) == 'el ' assert buf.as_str() == 'hello world' From noreply at buildbot.pypy.org Sun May 4 20:25:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 20:25:02 +0200 (CEST) Subject: [pypy-commit] pypy default: no need to calculate this Message-ID: <20140504182502.3AC271C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71263:04ddb47ecbe6 Date: 2014-05-04 13:32 -0400 http://bitbucket.org/pypy/pypy/changeset/04ddb47ecbe6/ Log: no need to calculate this diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -133,7 +133,7 @@ end = self.pos + count if end > self.length: raise StructError("unpack str size too short for format") - s = self.buf.getslice(self.pos, end, 1, end - self.pos) + s = self.buf.getslice(self.pos, end, 1, count) self.pos = end return s From noreply at buildbot.pypy.org Sun May 4 20:54:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 20:54:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140504185416.EC3211D236E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71264:98f4d805e5d2 Date: 2014-05-04 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/98f4d805e5d2/ Log: Fix diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -72,7 +72,7 @@ stmonly_jitdriver.jit_merge_point( self=self, co_code=co_code, next_instr=next_instr, ec=ec) - rstm.push_marker(intmask(next_instr) * 2 + 1, co_code) + rstm.push_marker(intmask(next_instr) * 2 + 1, self.pycode) try: next_instr = self.handle_bytecode(co_code, next_instr, ec) except ExitFrame: diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -62,7 +62,7 @@ co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) - rstm.push_marker(intmask(next_instr) * 2 + 1, co_code) + rstm.push_marker(intmask(next_instr) * 2 + 1, self.pycode) try: next_instr = self.handle_bytecode(co_code, next_instr, ec) except Yield: From noreply at buildbot.pypy.org Sun May 4 20:55:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 20:55:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Bah Message-ID: <20140504185542.2E79C1D2371@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71265:6180df23e44d Date: 2014-05-04 20:55 +0200 http://bitbucket.org/pypy/pypy/changeset/6180df23e44d/ Log: Bah diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -98,7 +98,7 @@ name = _RPyString_AsString(co_name); if (nlen > remaining / 2) { nlen = remaining / 2; - ntrunc = "<"; + ntrunc = ">"; } remaining -= nlen; @@ -107,7 +107,7 @@ if (fnlen > remaining) { fn += (fnlen - remaining); fnlen = remaining; - fntrunc = ">"; + fntrunc = "<"; } long lnotablen = RPyString_Size(co_lnotab); From noreply at buildbot.pypy.org Sun May 4 21:01:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 21:01:51 +0200 (CEST) Subject: [pypy-commit] stmgc marker: Close ready-to-be-merged branch Message-ID: <20140504190151.C6B401D2371@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: marker Changeset: r1199:59e10002738c Date: 2014-05-04 20:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/59e10002738c/ Log: Close ready-to-be-merged branch From noreply at buildbot.pypy.org Sun May 4 21:01:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 21:01:53 +0200 (CEST) Subject: [pypy-commit] stmgc default: hg merge marker Message-ID: <20140504190153.48E6A1D2371@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1200:84f5fbe03d5d Date: 2014-05-04 21:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/84f5fbe03d5d/ Log: hg merge marker Adds simple markers, which record the location in the user program, extracted for every entry in modified_old_object. This allows us to record and retrieve the marker corresponding to the longest abort or pause. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -46,6 +46,15 @@ void stmcb_commit_soon() {} +static void expand_marker(char *base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize) +{ + assert(following_object == NULL); + snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number); +} + + nodeptr_t global_chained_list; @@ -89,6 +98,18 @@ STM_START_TRANSACTION(&stm_thread_local, here); + if (stm_thread_local.longest_marker_state != 0) { + fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", + &stm_thread_local, + stm_thread_local.longest_marker_state, + stm_thread_local.longest_marker_time); + fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n", + stm_thread_local.longest_marker_self, + stm_thread_local.longest_marker_other); + stm_thread_local.longest_marker_state = 0; + stm_thread_local.longest_marker_time = 0.0; + } + nodeptr_t prev = initial; stm_read((objptr_t)prev); @@ -199,8 +220,16 @@ STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ + int loops = 0; + while (check_sorted() == -1) { + + STM_PUSH_MARKER(stm_thread_local, 2 * loops + 1, NULL); + bubble_run(); + + STM_POP_MARKER(stm_thread_local); + loops++; } STM_POP_ROOT(stm_thread_local, global_chained_list); @@ -247,6 +276,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + stmcb_expand_marker = expand_marker; setup_list(); diff --git a/c7/doc/marker.txt b/c7/doc/marker.txt new file mode 100644 --- /dev/null +++ b/c7/doc/marker.txt @@ -0,0 +1,42 @@ + +Reports +======= + +- self-abort: + WRITE_WRITE_CONTENTION, INEVITABLE_CONTENTION: + marker in both threads, time lost by this thread + WRITE_READ_CONTENTION: + marker pointing back to the write, time lost by this thread + +- aborted by a different thread: + WRITE_WRITE_CONTENTION: + marker in both threads, time lost by this thread + WRITE_READ_CONTENTION: + remote marker pointing back to the write, time lost by this thread + (no local marker available to know where we've read the object from) + INEVITABLE_CONTENTION: + n/a + +- self-pausing: + same as self-abort, but reporting the time lost by pausing + +- waiting for a free segment: + - if we're waiting because of inevitability, report with a + marker and the time lost + - if we're just waiting because of no free segment, don't report it, + or maybe with only the total time lost and no marker + +- more internal reasons for cond_wait(), like synchronizing the threads, + should all be resolved quickly and are unlikely worth a report + + +Internal Measurements +===================== + +- use clock_gettime(CLOCK_MONOTONIC), it seems to be the fastest way + (less than 5 times slower than a RDTSC instruction, which is itself + not safe in the presence of threads migrating among CPUs) + +- record only the highest-time entry. The user of the library is + responsible for getting and clearing it often enough if it wants + more details. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -99,7 +99,8 @@ static void contention_management(uint8_t other_segment_num, - enum contention_kind_e kind) + enum contention_kind_e kind, + object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); @@ -161,6 +162,7 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; + marker_contention(kind, false, other_segment_num, obj); change_timing_state(wait_category); @@ -177,7 +179,13 @@ if (must_abort()) abort_with_mutex(); - change_timing_state(STM_TIME_RUN_CURRENT); + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + double elapsed = + change_timing_state_tl(pseg->pub.running_thread, + STM_TIME_RUN_CURRENT); + marker_copy(pseg->pub.running_thread, pseg, + wait_category, elapsed); } else if (!contmgr.abort_other) { @@ -186,6 +194,7 @@ dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; + marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } @@ -193,6 +202,7 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; + marker_contention(kind, true, other_segment_num, obj); int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -270,7 +280,8 @@ } } -static void write_write_contention_management(uintptr_t lock_idx) +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj) { s_mutex_lock(); @@ -281,7 +292,7 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION); + contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -290,12 +301,13 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num) +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION); + contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION); + contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); } diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,6 +1,8 @@ -static void write_write_contention_management(uintptr_t lock_idx); -static void write_read_contention_management(uint8_t other_segment_num); +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj); +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -73,9 +73,15 @@ assert(lock_idx < sizeof(write_locks)); retry: if (write_locks[lock_idx] == 0) { + /* A lock to prevent reading garbage from + lookup_other_thread_recorded_marker() */ + acquire_marker_lock(STM_SEGMENT->segment_base); + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], - 0, lock_num))) + 0, lock_num))) { + release_marker_lock(STM_SEGMENT->segment_base); goto retry; + } dprintf_test(("write_slowpath %p -> mod_old\n", obj)); @@ -83,6 +89,15 @@ Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + /* Add the current marker, recording where we wrote to this object */ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->modified_old_objects_markers = + list_append2(STM_PSEGMENT->modified_old_objects_markers, + marker[0], marker[1]); + + release_marker_lock(STM_SEGMENT->segment_base); + /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ @@ -124,7 +139,7 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx); + write_write_contention_management(lock_idx, obj); goto retry; } @@ -194,6 +209,11 @@ STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; +#ifndef NDEBUG + STM_PSEGMENT->marker_inev[1] = 99999999999999999L; +#endif + if (jmpbuf == NULL) + marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; @@ -221,12 +241,17 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers)); assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); +#ifndef NDEBUG + /* this should not be used when objects_pointing_to_nursery == NULL */ + STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; +#endif check_nursery_at_transaction_start(); } @@ -261,7 +286,7 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i); + write_read_contention_management(i, item); /* If we reach this point, we didn't abort, but maybe we had to wait for the other thread to commit. If we @@ -446,6 +471,7 @@ release_privatization_lock(); list_clear(STM_PSEGMENT->modified_old_objects); + list_clear(STM_PSEGMENT->modified_old_objects_markers); } static void _finish_transaction(int attribute_to) @@ -584,6 +610,7 @@ })); list_clear(pseg->modified_old_objects); + list_clear(pseg->modified_old_objects_markers); } static void abort_data_structures_from_segment_num(int segment_num) @@ -608,6 +635,10 @@ (int)pseg->transaction_state); } + /* if we don't have marker information already, look up and preserve + the marker information from the shadowstack as a string */ + marker_default_for_abort(pseg); + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -618,6 +649,7 @@ value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); + pseg->shadowstack_at_abort = tl->shadowstack; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; @@ -689,6 +721,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); + marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -78,9 +78,17 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. */ + that need to be copied to other segments upon commit. Note that + every object takes three list items: the object, and two words for + the location marker. */ struct list_s *modified_old_objects; + /* For each entry in 'modified_old_objects', we have two entries + in the following list, which give the marker at the time we added + the entry to modified_old_objects. */ + struct list_s *modified_old_objects_markers; + uintptr_t modified_old_objects_markers_num_old; + /* List of out-of-nursery objects that may contain pointers to nursery objects. This is used to track the GC status: they are all objects outside the nursery on which an stm_write() occurred @@ -157,10 +165,18 @@ many reads / rare writes.) */ uint8_t privatization_lock; + /* This lock is acquired when we mutate 'modified_old_objects' but + we don't have the global mutex. It is also acquired during minor + collection. It protects against a different thread that tries to + get this segment's marker corresponding to some object, or to + expand the marker into a full description. */ + uint8_t marker_lock; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; + struct stm_shadowentry_s *shadowstack_at_abort; /* Already signalled to commit soon: */ bool signalled_to_commit_soon; @@ -169,6 +185,11 @@ #ifndef NDEBUG pthread_t running_pthread; #endif + + /* Temporarily stores the marker information */ + char marker_self[_STM_MARKER_LEN]; + char marker_other[_STM_MARKER_LEN]; + uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ }; enum /* safe_point */ { @@ -252,3 +273,17 @@ &STM_PSEGMENT->privatization_lock); spinlock_release(*lock); } + +static inline void acquire_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_acquire(*lock); +} + +static inline void release_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_release(*lock); +} diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -382,7 +382,7 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - if (((uintptr_t)current->ss) > STM_STACK_MARKER_OLD) + if ((((uintptr_t)current->ss) & 3) == 0) mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); @@ -421,6 +421,23 @@ } } +static void mark_visit_from_markers(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + char *base = get_segment_base(j); + struct list_s *lst = get_priv_segment(j)->modified_old_objects_markers; + uintptr_t i; + for (i = list_count(lst); i > 0; i -= 2) { + mark_visit_object((object_t *)list_item(lst, i - 1), base); + } + if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; + mark_visit_object((object_t *)marker_inev_obj, base); + } + } +} + static void clean_up_segment_lists(void) { long i; @@ -523,6 +540,7 @@ /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); + mark_visit_from_markers(); mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -33,6 +33,18 @@ #define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) +static inline struct list_s *list_append2(struct list_s *lst, + uintptr_t item0, uintptr_t item1) +{ + uintptr_t index = lst->count; + lst->count += 2; + if (UNLIKELY(index >= lst->last_allocated)) + lst = _list_grow(lst, index + 1); + lst->items[index + 0] = item0; + lst->items[index + 1] = item1; + return lst; +} + static inline void list_clear(struct list_s *lst) { @@ -66,6 +78,11 @@ lst->items[index] = newitem; } +static inline uintptr_t *list_ptr_to_item(struct list_s *lst, uintptr_t index) +{ + return &lst->items[index]; +} + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/c7/stm/marker.c b/c7/stm/marker.c new file mode 100644 --- /dev/null +++ b/c7/stm/marker.c @@ -0,0 +1,198 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); + +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + + +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +{ + /* fetch the current marker from the tl's shadow stack, + and return it in 'marker[2]'. */ + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + + /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is + a convenient stopper for the loop below but which shouldn't + be returned. */ + assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + + while (!(((uintptr_t)current->ss) & 1)) { + current--; + assert(current >= base); + } + if (current != base) { + /* found the odd marker */ + marker[0] = (uintptr_t)current[0].ss; + marker[1] = (uintptr_t)current[1].ss; + } + else { + /* no marker found */ + marker[0] = 0; + marker[1] = 0; + } +} + +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker) +{ + /* Expand the marker given by 'marker[2]' into a full string. This + works assuming that the marker was produced inside the segment + given by 'segment_base'. If that's from a different thread, you + must first acquire the corresponding 'marker_lock'. */ + assert(_has_mutex()); + outmarker[0] = 0; + if (marker[0] == 0) + return; /* no marker entry found */ + if (stmcb_expand_marker != NULL) { + stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], + outmarker, _STM_MARKER_LEN); + } +} + +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) +{ + if (pseg->marker_self[0] != 0) + return; /* already collected an entry */ + + uintptr_t marker[2]; + marker_fetch(pseg->pub.running_thread, marker); + marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); + pseg->marker_other[0] = 0; +} + +char *_stm_expand_marker(void) +{ + /* for tests only! */ + static char _result[_STM_MARKER_LEN]; + uintptr_t marker[2]; + _result[0] = 0; + s_mutex_lock(); + marker_fetch(STM_SEGMENT->running_thread, marker); + marker_expand(marker, STM_SEGMENT->segment_base, _result); + s_mutex_unlock(); + return _result; +} + +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time) +{ + /* Copies the marker information from pseg to tl. This is called + indirectly from abort_with_mutex(), but only if the lost time is + greater than that of the previous recorded marker. By contrast, + pseg->marker_self has been filled already in all cases. The + reason for the two steps is that we must fill pseg->marker_self + earlier than now (some objects may be GCed), but we only know + here the total time it gets attributed. + */ + if (stmcb_debug_print) { + stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); + } + if (time * 0.99 > tl->longest_marker_time) { + tl->longest_marker_state = attribute_to; + tl->longest_marker_time = time; + memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); + } + pseg->marker_self[0] = 0; + pseg->marker_other[0] = 0; +} + +static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, + uintptr_t marker[2]) +{ + assert(_has_mutex()); + + /* here, we acquired the other thread's marker_lock, which means that: + + (1) it has finished filling 'modified_old_objects' after it sets + up the write_locks[] value that we're conflicting with + + (2) it is not mutating 'modified_old_objects' right now (we have + the global mutex_lock at this point too). + */ + long i; + struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); + struct list_s *mlst = pseg->modified_old_objects; + struct list_s *mlstm = pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + return; + } + } + marker[0] = 0; + marker[1] = 0; +} + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj) +{ + uintptr_t self_marker[2]; + uintptr_t other_marker[2]; + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + other_pseg = get_priv_segment(other_segment_num); + + char *my_segment_base = STM_SEGMENT->segment_base; + char *other_segment_base = get_segment_base(other_segment_num); + + acquire_marker_lock(other_segment_base); + + /* Collect the location for myself. It's usually the current + location, except in a write-read abort, in which case it's the + older location of the write. */ + if (kind == WRITE_READ_CONTENTION) + marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + else + marker_fetch(my_pseg->pub.running_thread, self_marker); + + /* Expand this location into either my_pseg->marker_self or + other_pseg->marker_other, depending on who aborts. */ + marker_expand(self_marker, my_segment_base, + abort_other ? other_pseg->marker_other + : my_pseg->marker_self); + + /* For some categories, we can also collect the relevant information + for the other segment. */ + char *outmarker = abort_other ? other_pseg->marker_self + : my_pseg->marker_other; + switch (kind) { + case WRITE_WRITE_CONTENTION: + marker_fetch_obj_write(other_segment_num, obj, other_marker); + marker_expand(other_marker, other_segment_base, outmarker); + break; + case INEVITABLE_CONTENTION: + assert(abort_other == false); + other_marker[0] = other_pseg->marker_inev[0]; + other_marker[1] = other_pseg->marker_inev[1]; + marker_expand(other_marker, other_segment_base, outmarker); + break; + case WRITE_READ_CONTENTION: + strcpy(outmarker, ""); + break; + default: + outmarker[0] = 0; + break; + } + + release_marker_lock(other_segment_base); +} + +static void marker_fetch_inev(void) +{ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->marker_inev[0] = marker[0]; + STM_PSEGMENT->marker_inev[1] = marker[1]; +} diff --git a/c7/stm/marker.h b/c7/stm/marker.h new file mode 100644 --- /dev/null +++ b/c7/stm/marker.h @@ -0,0 +1,12 @@ + +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); +static void marker_fetch_inev(void); +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker); +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time); + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -160,28 +160,26 @@ --current; OPT_ASSERT(current >= base); - switch ((uintptr_t)current->ss) { + uintptr_t x = (uintptr_t)current->ss; - case 0: /* NULL */ - continue; - - case STM_STACK_MARKER_NEW: + if ((x & 3) == 0) { + /* the stack entry is a regular pointer (possibly NULL) */ + minor_trace_if_young(¤t->ss); + } + else if (x == STM_STACK_MARKER_NEW) { /* the marker was not already seen: mark it as seen, but continue looking more deeply in the shadowstack */ current->ss = (object_t *)STM_STACK_MARKER_OLD; - continue; - - case STM_STACK_MARKER_OLD: + } + else if (x == STM_STACK_MARKER_OLD) { /* the marker was already seen: we can stop the root stack tracing at this point */ - goto interrupt; - - default: - /* the stack entry is a regular pointer */ - minor_trace_if_young(¤t->ss); + break; + } + else { + /* it is an odd-valued marker, ignore */ } } - interrupt: minor_trace_if_young(&tl->thread_local_obj); } @@ -236,6 +234,24 @@ _collect_now(item)); } +static void collect_roots_from_markers(uintptr_t num_old) +{ + /* visit the marker objects */ + struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; + STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); + uintptr_t i, total = list_count(mlst); + assert((total & 1) == 0); + for (i = num_old + 1; i < total; i += 2) { + minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); + } + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + uintptr_t *pmarker_inev_obj = (uintptr_t *) + REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->marker_inev[1]); + minor_trace_if_young((object_t **)pmarker_inev_obj); + } +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ @@ -285,6 +301,8 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); + acquire_marker_lock(STM_SEGMENT->segment_base); + STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { /* We should commit soon, probably. This is kind of a @@ -306,6 +324,7 @@ /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ + uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); @@ -315,7 +334,12 @@ into objects_pointing_to_nursery, but instead we use the following shortcut */ collect_modified_old_objects(); + num_old = 0; } + else + num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; + + collect_roots_from_markers(num_old); collect_roots_in_nursery(); @@ -328,6 +352,8 @@ assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); + + release_marker_lock(STM_SEGMENT->segment_base); } static void minor_collection(bool commit) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -78,6 +78,7 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->modified_old_objects_markers = list_create(); pr->young_weakrefs = list_create(); pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); @@ -115,6 +116,7 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->modified_old_objects_markers); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -25,18 +25,26 @@ return oldstate; } -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) { TIMING_CHANGE(tl, newstate); + return elasped; } static void timing_end_transaction(enum stm_time_e attribute_to) { stm_thread_local_t *tl = STM_SEGMENT->running_thread; TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; + add_timing(tl, attribute_to, time_this_transaction); tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; + + if (attribute_to != STM_TIME_RUN_COMMITTED) { + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + marker_copy(tl, pseg, attribute_to, time_this_transaction); + } } static const char *timer_names[] = { @@ -74,6 +82,10 @@ fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } + fprintf(stderr, " %-24s %6s %11.6f s\n", + "longest recorded marker", "", tl->longest_marker_time); + fprintf(stderr, " \"%.*s\"\n", + (int)_STM_MARKER_LEN, tl->longest_marker_self); s_mutex_unlock(); } } diff --git a/c7/stm/timing.h b/c7/stm/timing.h --- a/c7/stm/timing.h +++ b/c7/stm/timing.h @@ -8,7 +8,7 @@ } static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -14,6 +14,7 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/timing.h" +#include "stm/marker.h" #include "stm/misc.c" #include "stm/list.c" @@ -33,3 +34,4 @@ #include "stm/fprintcolor.c" #include "stm/weakref.c" #include "stm/timing.c" +#include "stm/marker.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -74,6 +74,8 @@ _STM_TIME_N }; +#define _STM_MARKER_LEN 80 + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -91,6 +93,11 @@ float timing[_STM_TIME_N]; double _timing_cur_start; enum stm_time_e _timing_cur_state; + /* the marker with the longest associated time so far */ + enum stm_time_e longest_marker_state; + double longest_marker_time; + char longest_marker_self[_STM_MARKER_LEN]; + char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -269,8 +276,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW 1 -#define STM_STACK_MARKER_OLD 2 +#define STM_STACK_MARKER_NEW (-41) +#define STM_STACK_MARKER_OLD (-43) /* Every thread needs to have a corresponding stm_thread_local_t @@ -373,6 +380,43 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* The markers pushed in the shadowstack are an odd number followed by a + regular pointer. When needed, this library invokes this callback to + turn this pair into a human-readable explanation. */ +extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); +extern void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + +/* Conventience macros to push the markers into the shadowstack */ +#define STM_PUSH_MARKER(tl, odd_num, p) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + STM_PUSH_ROOT(tl, _odd_num); \ + STM_PUSH_ROOT(tl, p); \ +} while (0) + +#define STM_POP_MARKER(tl) ({ \ + object_t *_popped = STM_POP_ROOT_RET(tl); \ + STM_POP_ROOT_RET(tl); \ + _popped; \ +}) + +#define STM_UPDATE_MARKER_NUM(tl, odd_num) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + struct stm_shadowentry_s *_ss = (tl).shadowstack - 2; \ + while (!(((uintptr_t)(_ss->ss)) & 1)) { \ + _ss--; \ + assert(_ss >= (tl).shadowstack_base); \ + } \ + _ss->ss = (object_t *)_odd_num; \ +} while (0) + +char *_stm_expand_marker(void); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -28,6 +28,10 @@ int associated_segment_num; uint32_t events[]; float timing[]; + int longest_marker_state; + double longest_marker_time; + char longest_marker_self[]; + char longest_marker_other[]; ...; } stm_thread_local_t; @@ -118,6 +122,17 @@ #define STM_TIME_SYNC_PAUSE ... void stm_flush_timing(stm_thread_local_t *, int); + +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + +void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); +void stm_update_marker_num(stm_thread_local_t *, uintptr_t); +void stm_pop_marker(stm_thread_local_t *); +char *_stm_expand_marker(void); """) @@ -272,10 +287,24 @@ } } +void stm_push_marker(stm_thread_local_t *tl, uintptr_t onum, object_t *ob) +{ + STM_PUSH_MARKER(*tl, onum, ob); +} + +void stm_update_marker_num(stm_thread_local_t *tl, uintptr_t onum) +{ + STM_UPDATE_MARKER_NUM(*tl, onum); +} + +void stm_pop_marker(stm_thread_local_t *tl) +{ + STM_POP_MARKER(*tl); +} + void stmcb_commit_soon() { } - ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_LARGEMALLOC_TEST', '1'), @@ -439,6 +468,8 @@ self.current_thread = 0 def teardown_method(self, meth): + lib.stmcb_expand_marker = ffi.NULL + lib.stmcb_debug_print = ffi.NULL tl = self.tls[self.current_thread] if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): self.commit_transaction() # must succeed! diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py new file mode 100644 --- /dev/null +++ b/c7/test/test_marker.py @@ -0,0 +1,340 @@ +from support import * +import py, time + +class TestMarker(BaseTest): + + def test_marker_odd_simple(self): + self.start_transaction() + self.push_root(ffi.cast("object_t *", 29)) + stm_minor_collect() + stm_major_collect() + # assert did not crash + x = self.pop_root() + assert int(ffi.cast("uintptr_t", x)) == 29 + + def test_abort_marker_no_shadowstack(self): + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_OUTSIDE_TRANSACTION + assert tl.longest_marker_time == 0.0 + # + self.start_transaction() + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert tl.longest_marker_self[0] == '\x00' + assert tl.longest_marker_other[0] == '\x00' + + def test_abort_marker_shadowstack(self): + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert tl.longest_marker_self[0] == '\x00' + assert tl.longest_marker_other[0] == '\x00' + + def test_abort_marker_no_shadowstack_cb(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + seen.append(1) + lib.stmcb_expand_marker = expand_marker + seen = [] + # + self.start_transaction() + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_self[0] == '\x00' + assert not seen + + def test_abort_marker_shadowstack_cb(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) + assert ffi.string(tl.longest_marker_other) == '' + + def test_macros(self): + self.start_transaction() + p = stm_allocate(16) + tl = self.get_stm_thread_local() + lib.stm_push_marker(tl, 29, p) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 29) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + lib.stm_update_marker_num(tl, 27) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 27) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + self.push_root(p) + lib.stm_update_marker_num(tl, 27) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 27) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + lib.stm_pop_marker(tl) + py.test.raises(EmptyStack, self.pop_root) + + def test_stm_expand_marker(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + self.push_root(stm_allocate(32)) + self.push_root(stm_allocate(16)) + raw = lib._stm_expand_marker() + assert ffi.string(raw) == '29 %r' % (p,) + + def test_stmcb_debug_print(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '<<<%d>>>\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + @ffi.callback("void(char *, double, char *)") + def debug_print(cause, time, marker): + if 0.0 < time < 1.0: + time = "time_ok" + seen.append((ffi.string(cause), time, ffi.string(marker))) + seen = [] + lib.stmcb_expand_marker = expand_marker + lib.stmcb_debug_print = debug_print + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + self.abort_transaction() + # + assert seen == [("run aborted other", "time_ok", "<<<29>>>")] + + def test_multiple_markers(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + seen.append(number) + s = '%d %r\x00' % (number, ptr == ffi.NULL) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + seen = [] + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 27)) + self.push_root(p) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + raw = lib._stm_expand_marker() + assert ffi.string(raw) == '29 True' + assert seen == [29] + + def test_double_abort_markers_cb_write_write(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_minor_collect() + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + py.test.raises(Conflict, stm_set_char, p, 'B') + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE + assert ffi.string(tl.longest_marker_self) == '21' + assert ffi.string(tl.longest_marker_other) == '19' + + def test_double_abort_markers_cb_inevitable(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + c = (base + int(ffi.cast("uintptr_t", ptr)))[8] + s = '%d %r\x00' % (number, c) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + stm_set_char(p, 'A') + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", p)) + self.become_inevitable() + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_minor_collect() + # + self.switch(1) + self.start_transaction() + p = stm_allocate(16) + stm_set_char(p, 'B') + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", p)) + py.test.raises(Conflict, self.become_inevitable) + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE + assert ffi.string(tl.longest_marker_self) == "21 'B'" + assert ffi.string(tl.longest_marker_other) == "19 'A'" + + def test_read_write_contention(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + assert stm_get_char(p) == '\x00' + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + py.test.raises(Conflict, self.commit_transaction) + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ + assert ffi.string(tl.longest_marker_self) == '19' + assert ffi.string(tl.longest_marker_other) == ( + '') + + def test_double_remote_markers_cb_write_write(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + tl0 = self.get_stm_thread_local() + # + self.switch(1) + self.start_transaction() + self.become_inevitable() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'B') # aborts in #0 + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 23)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + # + py.test.raises(Conflict, self.switch, 0) + # + tl = self.get_stm_thread_local() + assert tl is tl0 + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE + assert ffi.string(tl.longest_marker_self) == '19' + assert ffi.string(tl.longest_marker_other) == '21' + + def test_double_remote_markers_cb_write_read(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + assert stm_get_char(p) == '\x00' # read + tl0 = self.get_stm_thread_local() + # + self.switch(1) + self.start_transaction() + self.become_inevitable() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'B') # write, will abort #0 + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 23)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + self.commit_transaction() + # + py.test.raises(Conflict, self.switch, 0) + # + tl = self.get_stm_thread_local() + assert tl is tl0 + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_READ + assert ffi.string(tl.longest_marker_self)=='' + assert ffi.string(tl.longest_marker_other) == '21' diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -1,7 +1,7 @@ from support import * import py -class TestBasic(BaseTest): +class TestNursery(BaseTest): def test_nursery_full(self): lib._stm_set_nursery_free_count(2048) From noreply at buildbot.pypy.org Sun May 4 21:04:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 May 2014 21:04:28 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/84f5fbe03d5d Message-ID: <20140504190428.759DB1D2371@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71266:df6ec66d4ecb Date: 2014-05-04 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/df6ec66d4ecb/ Log: import stmgc/84f5fbe03d5d diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -822e52f17647 +84f5fbe03d5d diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -306,6 +306,12 @@ STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { + /* We should commit soon, probably. This is kind of a + workaround for the broken stm_should_break_transaction of + pypy that doesn't want to commit any more after a minor + collection. It may, however, always be a good idea... */ + stmcb_commit_soon(); + /* 'STM_PSEGMENT->overflow_number' is used now by this collection, in the sense that it's copied to the overflow objects */ STM_PSEGMENT->overflow_number_has_been_used = true; From noreply at buildbot.pypy.org Sun May 4 22:51:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 22:51:21 +0200 (CEST) Subject: [pypy-commit] pypy default: allow jit to look inside buf.getslice if size is constant Message-ID: <20140504205121.0393A1C328C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71267:c1705fbd5f73 Date: 2014-05-04 16:32 -0400 http://bitbucket.org/pypy/pypy/changeset/c1705fbd5f73/ Log: allow jit to look inside buf.getslice if size is constant diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -1,7 +1,7 @@ """ Buffer protocol support. """ -from rpython.rlib.objectmodel import import_from_mixin +from rpython.rlib import jit class Buffer(object): @@ -21,6 +21,7 @@ "Returns the index'th character in the buffer." raise NotImplementedError # Must be overriden. No bounds checks. + @jit.look_inside_iff(lambda self, start, stop, step, size: jit.isconstant(size)) def getslice(self, start, stop, step, size): # May be overridden. No bounds checks. return ''.join([self.getitem(i) for i in range(start, stop, step)]) diff --git a/rpython/rlib/rstruct/formatiterator.py b/rpython/rlib/rstruct/formatiterator.py --- a/rpython/rlib/rstruct/formatiterator.py +++ b/rpython/rlib/rstruct/formatiterator.py @@ -82,6 +82,7 @@ def finished(self): pass + class CalcSizeFormatIterator(FormatIterator): totalsize = 0 From noreply at buildbot.pypy.org Sun May 4 22:53:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 22:53:19 +0200 (CEST) Subject: [pypy-commit] pypy default: add a test_pypy_c for struct unpack from array Message-ID: <20140504205319.50AFE1D236E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71268:582d5e465ec2 Date: 2014-05-04 16:51 -0400 http://bitbucket.org/pypy/pypy/changeset/582d5e465ec2/ Log: add a test_pypy_c for struct unpack from array diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -25,3 +25,42 @@ guard_true(i69, descr=...) --TICK-- """) + + def test_struct_unpack(self): + def main(n): + import struct + import array + a = array.array('c', struct.pack('i', 42)) + i = 0 + while i < n: + i += 1 + struct.unpack('i', a) # ID: unpack + return i + log = self.run(main, [1000]) + assert log.result == 1000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('unpack', """ + guard_not_invalidated(descr=...) + i93 = getarrayitem_raw(i55, 0, descr=) + i94 = getarrayitem_raw(i55, 1, descr=) + i95 = getarrayitem_raw(i55, 2, descr=) + i96 = getarrayitem_raw(i55, 3, descr=) + i97 = int_lshift(i94, 8) + i98 = int_or(i93, i97) + i99 = int_lshift(i95, 16) + i100 = int_or(i98, i99) + i101 = int_ge(i96, 128) + guard_false(i101, descr=...) + i102 = int_lshift(i96, 24) + i103 = int_or(i100, i102) + p104 = new(descr=) + p105 = new_array(0, descr=) + setfield_gc(p104, p105, descr=) + call(ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p104, 1, 1, descr=) + guard_no_exception(descr=...) + p106 = getfield_gc(p104, descr=) + i107 = getfield_raw(50657024, descr=) + setfield_gc(p104, 1, descr=) + i108 = int_lt(i107, 0) + guard_false(i108, descr=...) + """) From noreply at buildbot.pypy.org Sun May 4 23:11:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 4 May 2014 23:11:45 +0200 (CEST) Subject: [pypy-commit] pypy default: have ArrayBuffer implement getslice when possible Message-ID: <20140504211145.738341C02D9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71269:76427f9f6ae4 Date: 2014-05-04 17:11 -0400 http://bitbucket.org/pypy/pypy/changeset/76427f9f6ae4/ Log: have ArrayBuffer implement getslice when possible diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -612,6 +612,15 @@ data[index] = char array._charbuf_stop() + def getslice(self, start, stop, step, size): + if step == 1: + data = self.array._charbuf_start() + try: + return rffi.charpsize2str(rffi.ptradd(data, start), size) + finally: + self.array._charbuf_stop() + return Buffer.getslice(self, start, stop, step, size) + def get_raw_address(self): return self.array._charbuf_start() diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -21,7 +21,6 @@ "Returns the index'th character in the buffer." raise NotImplementedError # Must be overriden. No bounds checks. - @jit.look_inside_iff(lambda self, start, stop, step, size: jit.isconstant(size)) def getslice(self, start, stop, step, size): # May be overridden. No bounds checks. return ''.join([self.getitem(i) for i in range(start, stop, step)]) From noreply at buildbot.pypy.org Mon May 5 00:41:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 5 May 2014 00:41:52 +0200 (CEST) Subject: [pypy-commit] pypy default: update struct/array test_pypy_c Message-ID: <20140504224152.73F101C01CB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71270:fdd3a0ceb3e1 Date: 2014-05-04 18:41 -0400 http://bitbucket.org/pypy/pypy/changeset/fdd3a0ceb3e1/ Log: update struct/array test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -41,26 +41,22 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('unpack', """ guard_not_invalidated(descr=...) - i93 = getarrayitem_raw(i55, 0, descr=) - i94 = getarrayitem_raw(i55, 1, descr=) - i95 = getarrayitem_raw(i55, 2, descr=) - i96 = getarrayitem_raw(i55, 3, descr=) - i97 = int_lshift(i94, 8) - i98 = int_or(i93, i97) - i99 = int_lshift(i95, 16) - i100 = int_or(i98, i99) - i101 = int_ge(i96, 128) - guard_false(i101, descr=...) - i102 = int_lshift(i96, 24) - i103 = int_or(i100, i102) - p104 = new(descr=) - p105 = new_array(0, descr=) - setfield_gc(p104, p105, descr=) - call(ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p104, 1, 1, descr=) + p90 = newstr(4) + call(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) guard_no_exception(descr=...) - p106 = getfield_gc(p104, descr=) - i107 = getfield_raw(50657024, descr=) - setfield_gc(p104, 1, descr=) - i108 = int_lt(i107, 0) - guard_false(i108, descr=...) + i91 = strgetitem(p90, 0) + i92 = strgetitem(p90, 1) + i93 = int_lshift(i92, 8) + i94 = int_or(i91, i93) + i95 = strgetitem(p90, 2) + i96 = int_lshift(i95, 16) + i97 = int_or(i94, i96) + i98 = strgetitem(p90, 3) + i99 = int_ge(i98, 128) + guard_false(i99, descr=...) + i100 = int_lshift(i98, 24) + i101 = int_or(i97, i100) + i102 = getfield_raw(50657056, descr=) + i103 = int_lt(i102, 0) + guard_false(i103, descr=...) """) From noreply at buildbot.pypy.org Mon May 5 07:38:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 May 2014 07:38:15 +0200 (CEST) Subject: [pypy-commit] pypy default: capitalization and grammer (thanks Ryan) Message-ID: <20140505053815.B8CB21D2B78@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71271:01d8cef6d7b5 Date: 2014-05-05 08:36 +0300 http://bitbucket.org/pypy/pypy/changeset/01d8cef6d7b5/ Log: capitalization and grammer (thanks Ryan) diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -33,7 +33,7 @@ .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html -.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _ `NumPy`: http://pypy.org/numpydonate.html .. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html .. _`CFFI`: http://cffi.readthedocs.org .. _`cryptography`: https://cryptography.io @@ -66,8 +66,9 @@ Bugfixes -------- -Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider -performance slowdowns as bugs. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. Here is a summary of the user-facing changes; +for more information see `whats-new`_: * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. @@ -83,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -92,6 +93,8 @@ * Fix handling of tp_name for type objects .. _`HippyVM`: http://www.hippyvm.com +.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html + New Platforms and Features -------------------------- @@ -99,18 +102,18 @@ * Support for OpenBSD * Code cleanup: we continue to prune out old and unused code, and to refactor - large parts of the codebase. We have separated rpython from the PyPy python - interpreter, and rpython is seeing use in other dynamic language projects. + large parts of the codebase. We have separated RPython from the PyPy python + interpreter, and RPython is seeing use in other dynamic language projects. * Support for precompiled headers in the build process for MSVC * Tweak support of errno in cpyext (the PyPy implemenation of the capi) -Numpy +NumPy ----- -Numpy support has been split into a builtin ``_numpy`` module and a -fork of the numpy code base adapted to pypy at +NumPy support has been split into a builtin ``_numpy`` module and a +fork of the NumPy code base adapted to PyPy at ``https://bitbucket.org/pypy/numpy``. You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; @@ -120,20 +123,20 @@ * NumPy support has been improved, many failures in indexing, dtypes, and scalars were corrected. We are slowly approaching our goal of passing - the numpy test suite. We still do not support object or unicode ndarrays. + the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the numpy c +* speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should - be apparent in all Numpy functionality. + be apparent in all NumPy functionality. * Most of the core functionality of nditer has been implemented. -* A cffi-based ``numpy.random`` module is available as a branch in the numpy - repository, it will be merged soon after this release. +* A cffi-based ``numpy.random`` module is available as a branch; + it will be merged soon after this release. * enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load - memory operations used in numpy arrays. Further work remains here in virtualizing the + memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -1,5 +1,5 @@ ======================= -What's new in PyPy 2.2+ +What's new since PyPy 2.2.1? ======================= .. this is a revision shortly after release-2.2.x From noreply at buildbot.pypy.org Mon May 5 07:38:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 May 2014 07:38:16 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: capitalization and grammer (thanks Ryan) Message-ID: <20140505053816.F39B71D2B78@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71272:5121f615ecc6 Date: 2014-05-05 08:36 +0300 http://bitbucket.org/pypy/pypy/changeset/5121f615ecc6/ Log: capitalization and grammer (thanks Ryan) diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -33,7 +33,7 @@ .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html -.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _ `NumPy`: http://pypy.org/numpydonate.html .. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html .. _`CFFI`: http://cffi.readthedocs.org .. _`cryptography`: https://cryptography.io @@ -66,8 +66,9 @@ Bugfixes -------- -Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider -performance slowdowns as bugs. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. Here is a summary of the user-facing changes; +for more information see `whats-new`_: * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. @@ -83,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -92,6 +93,8 @@ * Fix handling of tp_name for type objects .. _`HippyVM`: http://www.hippyvm.com +.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html + New Platforms and Features -------------------------- @@ -99,18 +102,18 @@ * Support for OpenBSD * Code cleanup: we continue to prune out old and unused code, and to refactor - large parts of the codebase. We have separated rpython from the PyPy python - interpreter, and rpython is seeing use in other dynamic language projects. + large parts of the codebase. We have separated RPython from the PyPy python + interpreter, and RPython is seeing use in other dynamic language projects. * Support for precompiled headers in the build process for MSVC * Tweak support of errno in cpyext (the PyPy implemenation of the capi) -Numpy +NumPy ----- -Numpy support has been split into a builtin ``_numpy`` module and a -fork of the numpy code base adapted to pypy at +NumPy support has been split into a builtin ``_numpy`` module and a +fork of the NumPy code base adapted to PyPy at ``https://bitbucket.org/pypy/numpy``. You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; @@ -120,20 +123,20 @@ * NumPy support has been improved, many failures in indexing, dtypes, and scalars were corrected. We are slowly approaching our goal of passing - the numpy test suite. We still do not support object or unicode ndarrays. + the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the numpy c +* speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should - be apparent in all Numpy functionality. + be apparent in all NumPy functionality. * Most of the core functionality of nditer has been implemented. -* A cffi-based ``numpy.random`` module is available as a branch in the numpy - repository, it will be merged soon after this release. +* A cffi-based ``numpy.random`` module is available as a branch; + it will be merged soon after this release. * enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load - memory operations used in numpy arrays. Further work remains here in virtualizing the + memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -1,5 +1,5 @@ ======================= -What's new in PyPy 2.2+ +What's new since PyPy 2.2.1? ======================= .. this is a revision shortly after release-2.2.x From noreply at buildbot.pypy.org Mon May 5 09:03:54 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 May 2014 09:03:54 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: add more macros (tests?) Message-ID: <20140505070354.491E61D23D8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71273:3f7cffa4d3ed Date: 2014-05-05 10:03 +0300 http://bitbucket.org/pypy/pypy/changeset/3f7cffa4d3ed/ Log: add more macros (tests?) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -902,7 +902,9 @@ ("SIZEOF_TIME_T", rffi.TIME_T), ("SIZEOF_LONG", rffi.LONG), ("SIZEOF_SHORT", rffi.SHORT), - ("SIZEOF_INT", rffi.INT) + ("SIZEOF_INT", rffi.INT), + ("SIZEOF_FLOAT", rffi.FLOAT), + ("SIZEOF_DOUBLE", rffi.DOUBLE), ]: pypy_macros.append("#define %s %s" % (macro_name, rffi.sizeof(size))) pypy_macros.append('') From noreply at buildbot.pypy.org Mon May 5 09:03:55 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 May 2014 09:03:55 +0200 (CEST) Subject: [pypy-commit] pypy default: add more macros (tests?) Message-ID: <20140505070355.7E7CF1D23D8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71274:5ceef23a14b8 Date: 2014-05-05 10:03 +0300 http://bitbucket.org/pypy/pypy/changeset/5ceef23a14b8/ Log: add more macros (tests?) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -902,7 +902,9 @@ ("SIZEOF_TIME_T", rffi.TIME_T), ("SIZEOF_LONG", rffi.LONG), ("SIZEOF_SHORT", rffi.SHORT), - ("SIZEOF_INT", rffi.INT) + ("SIZEOF_INT", rffi.INT), + ("SIZEOF_FLOAT", rffi.FLOAT), + ("SIZEOF_DOUBLE", rffi.DOUBLE), ]: pypy_macros.append("#define %s %s" % (macro_name, rffi.sizeof(size))) pypy_macros.append('') From noreply at buildbot.pypy.org Mon May 5 09:50:44 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 09:50:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some extensions, do the XXXs Message-ID: <20140505075044.541211C328C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5227:e223b2a3a220 Date: 2014-05-05 09:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/e223b2a3a220/ Log: some extensions, do the XXXs diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -21,7 +21,7 @@ \conferenceinfo{ICOOOLPS workshop 2014}{July 28th, 2014, Uppsala, Sweden} \copyrightyear{2014} -\copyrightdata{978-1-nnnn-nnnn-n/yy/mm} +%\copyrightdata{978-1-nnnn-nnnn-n/yy/mm} \doi{nnnnnnn.nnnnnnn} % Uncomment one of the following two, if you are not going for the @@ -37,7 +37,7 @@ %% \titlebanner{banner above paper title} % These are ignored unless %% \preprintfooter{short description of paper} % 'preprint' option specified. -\title{The Way Forward in Parallelizing Dynamic Languages} +\title{The Way Forward in Parallelising Dynamic Languages} \subtitle{Position Paper, ICOOOLPS'14} \authorinfo{Remigius Meier} @@ -50,10 +50,21 @@ \maketitle \begin{abstract} -This is the text of the abstract. + Dynamic languages became very popular in recent years. At some + point, the need for concurrency arised, and many of them made the + choice to use a single global interpreter lock (GIL) to synchronize + the interpreter in a multithreading scenario. This choice however + makes it impossible to actually run code in parallel. + + Here we want to compare different approaches to replacing the GIL + with a technology that allows parallel execution. We look at + fine-grained locking, shared-nothing, and transactional memory (TM) + approaches. We argue that software-based TM systems are the most + promising, especially since they also enable the introduction of + atomic blocks as a better synchronization mechanism in the language. \end{abstract} -\category{CR-number}{subcategory}{third-level} +%\category{CR-number}{subcategory}{third-level} % general terms are not compulsory anymore, % you may leave them out @@ -123,6 +134,8 @@ \section{Discussion} +In this section we examine the approaches and highlight their +advantages and disadvantages. %% \paragraph{dynamic language VM problems} %% XXX: %% - high allocation rate (short lived objects)\\ @@ -141,9 +154,9 @@ code. While depending on this may not always be a good idea, it is done in practice. A GIL-replacement should therefore uphold these guarantees, while preferably also be as easily implementable as a GIL -for the interpreter. -[xxx mention that the interpreter is typically - very large and maintained by open-source communities] +for the interpreter. The latter can be especially important since +many of these languages are developed and maintained by very large +open-source communities, which are not easy to coordinate. The GIL also allows for easy integration with external C libraries that may not be thread-safe. For the duration of the calls, we @@ -343,12 +356,11 @@ locally by a program transformation\cite{felber07}. There are attempts to do the same for fine-grained locking\cite{bill06} but they require a whole program analysis since locks are inherently non-composable. -The effectiveness of these approaches still has to be proven for our -use case. -[xxx or maybe: "The effectiveness of these approaches is doubtful in our -use case --- for example, it makes it close to impossible to order the -locks consistently or to know in advance which locks a transaction will -need.] +The effectiveness of these approaches is doubtful in our use case, +since we execute bytecode instructions in any order defined by a +script only known at runtime. This makes it close to impossible to +order locks consistently or to know in advance which locks a +transaction will need. %% - overhead (100-1000\%) (barrier reference resolution, kills performance on low \#cpu) %% (FastLane: low overhead, not much gain)\\ From noreply at buildbot.pypy.org Mon May 5 10:52:45 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 5 May 2014 10:52:45 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: change title Message-ID: <20140505085245.E8CAA1C01DE@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5228:008c9121c210 Date: 2014-05-05 10:52 +0200 http://bitbucket.org/pypy/extradoc/changeset/008c9121c210/ Log: change title diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -37,7 +37,7 @@ %% \titlebanner{banner above paper title} % These are ignored unless %% \preprintfooter{short description of paper} % 'preprint' option specified. -\title{The Way Forward in Parallelising Dynamic Languages} +\title{A Way Forward in Parallelising Dynamic Languages} \subtitle{Position Paper, ICOOOLPS'14} \authorinfo{Remigius Meier} From noreply at buildbot.pypy.org Mon May 5 10:52:58 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 5 May 2014 10:52:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: a few typos Message-ID: <20140505085258.C23A41C01DE@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5229:354573ba2558 Date: 2014-05-05 10:52 +0200 http://bitbucket.org/pypy/extradoc/changeset/354573ba2558/ Log: a few typos diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -51,8 +51,8 @@ \begin{abstract} Dynamic languages became very popular in recent years. At some - point, the need for concurrency arised, and many of them made the - choice to use a single global interpreter lock (GIL) to synchronize + point, the need for concurrency arose, and many of them made the + choice to use a single global interpreter lock (GIL) to synchronise the interpreter in a multithreading scenario. This choice however makes it impossible to actually run code in parallel. @@ -61,7 +61,7 @@ fine-grained locking, shared-nothing, and transactional memory (TM) approaches. We argue that software-based TM systems are the most promising, especially since they also enable the introduction of - atomic blocks as a better synchronization mechanism in the language. + atomic blocks as a better synchronisation mechanism in the language. \end{abstract} %\category{CR-number}{subcategory}{third-level} @@ -116,9 +116,9 @@ %% designed with GIL semantics in mind. %% Furthermore, a solution to this problem should also bring better -%% synchronization mechanism with it... +%% synchronisation mechanism with it... -%% (supporting (large) atomic blocks for synchronization) +%% (supporting (large) atomic blocks for synchronisation) %% \subsection{Our Position} %% Current solutions for replacing the GIL include STM, HTM, and @@ -144,8 +144,8 @@ \subsection{Why is there a GIL?} The GIL is a very simple synchronisation mechanism for supporting -multithreading in the interpreter. The basic guarantee is that the GIL -may only be released in-between bytecode instructions. The interpreter +multithreading in an interpreter. The basic guarantee is that the GIL +may only be released in between bytecode instructions. The interpreter can thus rely on complete isolation and atomicity of these instructions. Additionally, it provides the application with a sequential consistency model\cite{lamport79}. As a consequence, @@ -413,7 +413,7 @@ are executing a program only known at runtime. Additionally, replacing the GIL means running every part of the application in transactions, so there is not much code that can run outside and that can be -optimized better. The performance of the TM system is vital. +optimised better. The performance of the TM system is vital. One way to get more performance is to develop STM systems that make better use of low-level features in existing OS kernels. We are From noreply at buildbot.pypy.org Mon May 5 10:53:49 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 5 May 2014 10:53:49 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: use a ~ (non-breaking space) before citations Message-ID: <20140505085349.AA15F1C01DE@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5230:9753ced909e1 Date: 2014-05-05 10:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/9753ced909e1/ Log: use a ~ (non-breaking space) before citations diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -148,7 +148,7 @@ may only be released in between bytecode instructions. The interpreter can thus rely on complete isolation and atomicity of these instructions. Additionally, it provides the application with a -sequential consistency model\cite{lamport79}. As a consequence, +sequential consistency model~\cite{lamport79}. As a consequence, applications can rely on certain operations to be atomic and that they will always be executed in the order in which they appear in the code. While depending on this may not always be a good idea, it is @@ -226,7 +226,7 @@ single-threaded case compared to the GIL, which requires much less acquire-release operations. -Jython\cite{webjython} is one project that implements an +Jython~\cite{webjython} is one project that implements an interpreter for Python on the JVM\footnote{Java Virtual Machine} and that uses fine-grained locking to correctly synchronise the interpreter. For a language like Python, one needs quite a few, @@ -298,7 +298,7 @@ desktop CPUs from Intel (Haswell generation). \paragraph{HTM} provides us with transactions like any TM system does. -It can be used as a direct replacement for the GIL\cite{nicholas06,odaira14,fuad10}. However, as is +It can be used as a direct replacement for the GIL~\cite{nicholas06,odaira14,fuad10}. However, as is common with hardware-only solutions, there are quite a few limitations that can not be lifted easily. For this comparison, we look at the implementation of Intel in recent Haswell generation CPUs. @@ -308,13 +308,13 @@ importantly it limits the amount of memory that can be accessed within a transaction. This transaction-length limitation makes it necessary to have a fallback in place in case this limit is reached. In recent -attempts, the usual fallback is the GIL\cite{odaira14,fuad10}. In our +attempts, the usual fallback is the GIL~\cite{odaira14,fuad10}. In our experiments, the current generation of HTM proved to be very fragile and thus needing the fallback very often. Consequently, scalability suffered a lot from this. The performance of HTM is pretty good as it does not introduce much -overhead ($<40\%$ overhead\cite{odaira14}). And it can transparently +overhead ($<40\%$ overhead~\cite{odaira14}). And it can transparently parallelise existing applications to some degree. The implementation is very straight-forward because it directly replaces the GIL in a central place. HTM is also directly compatible with any external @@ -332,11 +332,11 @@ \paragraph{STM} provides all the same benefits as HTM except for its performance. It is not unusual for the overhead introduced by STM to -be between 100\% to even 1000\% \cite{cascaval08,drago11}. While STM +be between 100\% to even 1000\% ~\cite{cascaval08,drago11}. While STM systems often scale very well to a big number of threads and eventually overtake the single-threaded execution, they often provide no benefits at all for low numbers of threads (1-8). There are some -attempts \cite{warmhoff13,spear09} that can reduce the overhead a lot, +attempts ~\cite{warmhoff13,spear09} that can reduce the overhead a lot, but scale badly or only for certain workloads. Often the benefits on more than one thread are too little in real world applications. @@ -353,8 +353,8 @@ While one can argue that STM requires the insertion of read and write barriers in the whole program, this can be done automatically and -locally by a program transformation\cite{felber07}. There are attempts -to do the same for fine-grained locking\cite{bill06} but they require +locally by a program transformation~\cite{felber07}. There are attempts +to do the same for fine-grained locking~\cite{bill06} but they require a whole program analysis since locks are inherently non-composable. The effectiveness of these approaches is doubtful in our use case, since we execute bytecode instructions in any order defined by a From noreply at buildbot.pypy.org Mon May 5 10:58:14 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 10:58:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: add real stmcb_commit_soon() Message-ID: <20140505085814.592FD1C155F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71275:cce0ed9a775c Date: 2014-05-05 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/cce0ed9a775c/ Log: add real stmcb_commit_soon() diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -23,6 +23,16 @@ pypy_stmcb_trace(obj, (void(*)(void*))visit); } +inline void stmcb_commit_soon() +{ + if (pypy_stm_nursery_low_fill_mark == (uintptr_t)-1) { + /* atomic */ + pypy_stm_nursery_low_fill_mark_saved = 0; + } else { + pypy_stm_nursery_low_fill_mark >>= 2; + } +} + /************************************************************/ /* "include" the stmgc.c file here */ @@ -147,7 +157,7 @@ transaction. */ assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || (pypy_stm_nursery_low_fill_mark == 0)); stm_commit_transaction(); @@ -182,7 +192,7 @@ transaction whose jmpbuf points into this function */ if (pypy_stm_ready_atomic == 1) { - assert(pypy_stm_nursery_low_fill_mark != 0); + //assert(pypy_stm_nursery_low_fill_mark != 0); assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); stm_commit_transaction(); pypy_stm_nursery_low_fill_mark = 0; @@ -196,7 +206,7 @@ } /* double-check */ if (pypy_stm_ready_atomic == 1) { - assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || (pypy_stm_nursery_low_fill_mark == 0)); } else { @@ -234,5 +244,3 @@ _pypy_stm_inev_state(); stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); } - -void stmcb_commit_soon(void) { /*XXX FIXME*/ } diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -73,7 +73,7 @@ case 1: pypy_stm_nursery_low_fill_mark = pypy_stm_nursery_low_fill_mark_saved; assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || (pypy_stm_nursery_low_fill_mark == 0)); break; case 0: From noreply at buildbot.pypy.org Mon May 5 12:09:55 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 5 May 2014 12:09:55 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: a few tweaks, a few places where more citations are needed Message-ID: <20140505100955.32CFD1C01DE@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5231:6698e6fce68f Date: 2014-05-05 12:09 +0200 http://bitbucket.org/pypy/extradoc/changeset/6698e6fce68f/ Log: a few tweaks, a few places where more citations are needed diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -10,8 +10,19 @@ \usepackage[utf8]{inputenc} \usepackage{array} +\usepackage{color} \usepackage{hyperref} \usepackage{amsmath} +\usepackage{amssymb} + +\newcommand{\mynote}[2]{% + \textcolor{red}{% + \fbox{\bfseries\sffamily\scriptsize#1}% + {\small$\blacktriangleright$\textsf{\emph{#2}}$\blacktriangleleft$}% + }% +} + +\newcommand\cfbolz[1]{\mynote{Carl Friedrich}{#1}} \begin{document} @@ -174,11 +185,12 @@ it is not exposed to the application running on top of it. To synchronise memory accesses in applications using threads, the state-of-the-art still means explicit locking everywhere. It is well -known that using locks for synchronisation is not easy. They are +known that using locks for synchronisation is not easy\cfbolz{citation needed +:-). would be cool if you could find something}. They are non-composable, have overhead, may deadlock, limit scalability, and -overall add a lot of complexity. For a better parallel programming +overall add a lot of complexity\cfbolz{same here, really}. For a better parallel programming model for dynamic languages, we propose another, well-known -synchronisation mechanism called \emph{atomic blocks}. +synchronisation mechanism called \emph{atomic blocks}\cfbolz{and here}. Atomic blocks are composable, deadlock-free, higher-level and expose useful atomicity and isolation guarantees to the application for a @@ -199,7 +211,7 @@ \begin{description} \item[Performance:] How well does the approach perform compared to the - GIL on single and multiple threads? + GIL on a single and on multiple threads? \item[Existing applications:] How big are the changes required to integrate with and parallelise existing applications? \item[Better synchronisation:] Does the approach enable better @@ -243,6 +255,7 @@ It does however not provide a better synchronisation mechanism to the application like e.g. atomic blocks. +\cfbolz{I think you should mention the commented out point below, that a lot of existing code contains latent races / deadlocks that are just not exposed in a GIL-full world} %% - support of atomic blocks?\\ %% - hard to get right (deadlocks, performance, lock-granularity)\\ %% - very hard to get right for a large language\\ @@ -295,7 +308,7 @@ capabilities as software-only approaches but with different performance characteristics. We will now first look at HTM, which recently gained a lot of popularity by its introduction in common -desktop CPUs from Intel (Haswell generation). +desktop CPUs from Intel (Haswell generation).\cfbolz{citation} \paragraph{HTM} provides us with transactions like any TM system does. It can be used as a direct replacement for the GIL~\cite{nicholas06,odaira14,fuad10}. However, as is @@ -341,7 +354,7 @@ on more than one thread are too little in real world applications. However, STM compared to HTM does not suffer from the same restricting -limitations. Transactions can be arbitrarily long. This makes it +limitations. Transactions can in principle be arbitrarily long. This makes it possible to actually expose transactions to the application in the form of atomic blocks. This is the only approach that enables a better synchronisation mechanism than locks for applications \emph{and} still From noreply at buildbot.pypy.org Mon May 5 12:11:25 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 5 May 2014 12:11:25 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: move the table up, to force latex to put it into a sensible place Message-ID: <20140505101125.0ABB61C01DE@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5232:8a17d33447d1 Date: 2014-05-05 12:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/8a17d33447d1/ Log: move the table up, to force latex to put it into a sensible place diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -143,6 +143,31 @@ %% language. STM is the best way forward but has bad performance, so we %% fix that. + +\begin{table*}[ht] + \centering + \begin{tabular}{|l|c|c|c|c|c|} + \hline + & \textbf{GIL} & \textbf{Fine-grained locking} + & \textbf{Shared-nothing} & \textbf{HTM} & \textbf{STM}\\ + \hline + Performance (single threaded) & ++ & + & ++ & ++ & -{-} \\ + \hline + Performance (multithreaded) & -{-} & + & + & + & + \\ + \hline + Existing applications & ++ & ++ & -{-} & ++ & ++ \\ + \hline + Better synchronisation & - & - & - & - & ++ \\ + \hline + Implementation & ++ & - & ++ & ++ & ++ \\ + \hline + External libraries & ++ & ++ & ++ & ++ & ++ \\ + \hline + \end{tabular} + \caption{Comparison between the approaches (-{-}/-/o/+/++)} + \label{tab:comparison} +\end{table*} + \section{Discussion} In this section we examine the approaches and highlight their @@ -383,30 +408,6 @@ \section{The Way Forward} -\begin{table*}[h] - \centering - \begin{tabular}{|l|c|c|c|c|c|} - \hline - & \textbf{GIL} & \textbf{Fine-grained locking} - & \textbf{Shared-nothing} & \textbf{HTM} & \textbf{STM}\\ - \hline - Performance (single threaded) & ++ & + & ++ & ++ & -{-} \\ - \hline - Performance (multithreaded) & -{-} & + & + & + & + \\ - \hline - Existing applications & ++ & ++ & -{-} & ++ & ++ \\ - \hline - Better synchronisation & - & - & - & - & ++ \\ - \hline - Implementation & ++ & - & ++ & ++ & ++ \\ - \hline - External libraries & ++ & ++ & ++ & ++ & ++ \\ - \hline - \end{tabular} - \caption{Comparison between the approaches (-{-}/-/o/+/++)} - \label{tab:comparison} -\end{table*} - Following the above argumentation for each approach we assembled a general overview in Table \ref{tab:comparison}. The general picture is From noreply at buildbot.pypy.org Mon May 5 12:53:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 12:53:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Move this check earlier (random attempt to avoid SIGILL on very very old x86 machines) Message-ID: <20140505105316.383611C3569@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71276:3484aaa1e858 Date: 2014-05-05 12:52 +0200 http://bitbucket.org/pypy/pypy/changeset/3484aaa1e858/ Log: Move this check earlier (random attempt to avoid SIGILL on very very old x86 machines) diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -35,9 +35,6 @@ pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif pypy_asm_stack_bottom(); -#ifdef PYPY_X86_CHECK_SSE2_DEFINED - pypy_x86_check_sse2(); -#endif instrument_setup(); #ifndef MS_WINDOWS @@ -83,6 +80,9 @@ int PYPY_MAIN_FUNCTION(int argc, char *argv[]) { +#ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_x86_check_sse2(); +#endif return pypy_main_function(argc, argv); } From noreply at buildbot.pypy.org Mon May 5 13:41:11 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 13:41:11 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more citations Message-ID: <20140505114111.A55A61C01DE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5233:9c837c7672df Date: 2014-05-05 13:41 +0200 http://bitbucket.org/pypy/extradoc/changeset/9c837c7672df/ Log: more citations diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -157,7 +157,7 @@ \hline Existing applications & ++ & ++ & -{-} & ++ & ++ \\ \hline - Better synchronisation & - & - & - & - & ++ \\ + Better synchronisation & o & o & o & - & ++ \\ \hline Implementation & ++ & - & ++ & ++ & ++ \\ \hline @@ -210,12 +210,13 @@ it is not exposed to the application running on top of it. To synchronise memory accesses in applications using threads, the state-of-the-art still means explicit locking everywhere. It is well -known that using locks for synchronisation is not easy\cfbolz{citation needed -:-). would be cool if you could find something}. They are -non-composable, have overhead, may deadlock, limit scalability, and -overall add a lot of complexity\cfbolz{same here, really}. For a better parallel programming +known that using locks for synchronisation is not +easy~\cite{christopher10,victor11,shan08}. They are non-composable, +have overhead, may deadlock, limit scalability, and add to the overall +complexity of the program logic. For a better parallel programming model for dynamic languages, we propose another, well-known -synchronisation mechanism called \emph{atomic blocks}\cfbolz{and here}. +synchronisation mechanism called \emph{atomic + blocks}~\cite{tim03,tim05}. Atomic blocks are composable, deadlock-free, higher-level and expose useful atomicity and isolation guarantees to the application for a @@ -522,6 +523,34 @@ correctly executes multiprocess programs." \emph{Computers, IEEE Transactions} on 100.9 (1979): 690-691. +\bibitem{victor11} + Victor Pankratius and Ali-Reza Adl-Tabatabai. 2011. A study of + transactional memory vs. locks in practice. In \emph{Proceedings of + the twenty-third annual ACM symposium on Parallelism in algorithms + and architectures} (SPAA '11). ACM, New York, NY, USA + +\bibitem{christopher10} + Christopher J. Rossbach, Owen S. Hofmann, and Emmett + Witchel. 2010. Is transactional programming actually + easier?. \emph{SIGPLAN} Not. 45, 5 (January 2010), 47-56. + +\bibitem{tim03} + Tim Harris and Keir Fraser. 2003. Language support for lightweight + transactions. \emph{In Proceedings of the 18th annual ACM SIGPLAN + conference on Object-oriented programing, systems, languages, and + applications} (OOPSLA '03). + +\bibitem{tim05} + Tim Harris, Simon Marlow, Simon Peyton-Jones, and Maurice + Herlihy. 2005. Composable memory transactions. \emph{In Proceedings + of the tenth ACM SIGPLAN symposium on Principles and practice of + parallel programming} (PPoPP '05). + +\bibitem{shan08} + Shan Lu, Soyeon Park, Eunsoo Seo, and Yuanyuan Zhou. 2008. Learning + from mistakes: a comprehensive study on real world concurrency bug + characteristics. \emph{SIGARCH Comput. Archit. News} 36, 1 (March 2008), + 329-339. \end{thebibliography} From noreply at buildbot.pypy.org Mon May 5 13:53:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 13:53:52 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: another comment Message-ID: <20140505115352.E04221C155F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5234:18ba53e1a5b4 Date: 2014-05-05 13:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/18ba53e1a5b4/ Log: another comment diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -274,14 +274,15 @@ however be very simple too. One could simply use one lock per library to avoid this issue. -In the end, fine-grained locking can transparently replace the GIL -and therefore parallelise existing applications, generally without any +In the end, fine-grained locking can transparently replace the GIL and +therefore parallelise existing applications, generally without any changes\footnote{There are rare cases where not having atomic -bytecodes actually changes the semantics.} -It does however not provide a better synchronisation -mechanism to the application like e.g. atomic blocks. + bytecodes actually changes the semantics.}. An implementation has to +follow the GIL semantics very closely, otherwise it may expose some +latent data races in existing applications which are just not exposed +with a GIL. This approach does however not provide a better parallelising +synchronisation mechanism to the application like e.g. atomic blocks. -\cfbolz{I think you should mention the commented out point below, that a lot of existing code contains latent races / deadlocks that are just not exposed in a GIL-full world} %% - support of atomic blocks?\\ %% - hard to get right (deadlocks, performance, lock-granularity)\\ %% - very hard to get right for a large language\\ From noreply at buildbot.pypy.org Mon May 5 13:57:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 13:57:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: another citation Message-ID: <20140505115734.8118C1C155F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5235:09622858923e Date: 2014-05-05 13:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/09622858923e/ Log: another citation diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -335,7 +335,7 @@ capabilities as software-only approaches but with different performance characteristics. We will now first look at HTM, which recently gained a lot of popularity by its introduction in common -desktop CPUs from Intel (Haswell generation).\cfbolz{citation} +desktop CPUs from Intel (Haswell generation)~\cite{odaira14,leis14}. \paragraph{HTM} provides us with transactions like any TM system does. It can be used as a direct replacement for the GIL~\cite{nicholas06,odaira14,fuad10}. However, as is @@ -553,6 +553,11 @@ characteristics. \emph{SIGARCH Comput. Archit. News} 36, 1 (March 2008), 329-339. +\bibitem{leis14} + Leis, Viktor, Alfons Kemper, and Thomas Neumann. "Exploiting + Hardware Transactional Memory in Main-Memory Databases." + \emph{Proc. of ICDE}. 2014. + \end{thebibliography} From noreply at buildbot.pypy.org Mon May 5 13:59:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 13:59:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Correctly reset the state between iterations. Before this change, successive iterations would take slightly longer and Message-ID: <20140505115923.9FF6F1C155F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71277:c1d1f4dd5f5f Date: 2014-05-05 13:58 +0200 http://bitbucket.org/pypy/pypy/changeset/c1d1f4dd5f5f/ Log: Correctly reset the state between iterations. Before this change, successive iterations would take slightly longer and longer to execute, with no bound; roughly 2-4% slower every 500 iterations. diff --git a/rpython/translator/goal/richards.py b/rpython/translator/goal/richards.py --- a/rpython/translator/goal/richards.py +++ b/rpython/translator/goal/richards.py @@ -144,6 +144,9 @@ class TaskWorkArea(object): def __init__(self): + self.reset() + + def reset(self): self.taskTab = [None] * TASKTABSIZE self.taskList = None @@ -151,7 +154,6 @@ self.holdCount = 0 self.qpktCount = 0 -taskWorkArea = TaskWorkArea() class Task(TaskState): @@ -361,8 +363,7 @@ def run(self, iterations): for i in xrange(iterations): - taskWorkArea.holdCount = 0 - taskWorkArea.qpktCount = 0 + taskWorkArea.reset() IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec()) From noreply at buildbot.pypy.org Mon May 5 13:59:40 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 13:59:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: at least give HTM a 0 too Message-ID: <20140505115940.685C61C155F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5236:788db02a12d7 Date: 2014-05-05 13:59 +0200 http://bitbucket.org/pypy/extradoc/changeset/788db02a12d7/ Log: at least give HTM a 0 too diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -157,7 +157,7 @@ \hline Existing applications & ++ & ++ & -{-} & ++ & ++ \\ \hline - Better synchronisation & o & o & o & - & ++ \\ + Better synchronisation & o & o & o & o & ++ \\ \hline Implementation & ++ & - & ++ & ++ & ++ \\ \hline From noreply at buildbot.pypy.org Mon May 5 14:15:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 14:15:43 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Update to use the new 'atomic' module Message-ID: <20140505121543.7FEBA1C130C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r256:be0306e43d45 Date: 2014-05-03 13:46 +0200 http://bitbucket.org/pypy/benchmarks/changeset/be0306e43d45/ Log: Update to use the new 'atomic' module diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -2,12 +2,7 @@ from threading import Thread, Condition, Lock, local import thread, atexit, sys, time -try: - from __pypy__.thread import atomic, getsegmentlimit -except ImportError: - atomic = Lock() - def getsegmentlimit(): - return 1 +from atomic import atomic, getsegmentlimit, print_abort_info class TLQueue_concurrent(object): diff --git a/multithread/threadworms/threadworms.py b/multithread/threadworms/threadworms.py --- a/multithread/threadworms/threadworms.py +++ b/multithread/threadworms/threadworms.py @@ -6,7 +6,7 @@ # This is meant to be an educational example of multithreaded programming, # so I get kind of verbose in the comments. -from common.abstract_threading import atomic, Future +from common.abstract_threading import atomic, Future, print_abort_info import time import random, sys, threading @@ -61,6 +61,7 @@ self.direction = self.rnd.choice((UP, DOWN, LEFT, RIGHT)) with atomic: + print_abort_info(0.01) # GRID_LOCK.acquire() # don't return (that is, block) until this thread can acquire the lock nextx, nexty = self.getNextPosition() From noreply at buildbot.pypy.org Mon May 5 14:15:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 14:15:44 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Fix the multithreaded richards as done in c1d1f4dd5f5f for translator/goal/richards.py. Message-ID: <20140505121544.E340F1C130C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r257:d047b054ff7f Date: 2014-05-05 14:15 +0200 http://bitbucket.org/pypy/benchmarks/changeset/d047b054ff7f/ Log: Fix the multithreaded richards as done in c1d1f4dd5f5f for translator/goal/richards.py. diff --git a/multithread/multithread-richards.py b/multithread/multithread-richards.py --- a/multithread/multithread-richards.py +++ b/multithread/multithread-richards.py @@ -370,7 +370,6 @@ def __init__(self): self.finished_lock = thread.allocate_lock() self.finished_lock.acquire() - self.taskWorkArea = TaskWorkArea() def run_and_unlock(self, to_do): os.write(1, 'running...\n') @@ -382,15 +381,14 @@ except IndexError: break iterations += 1 - self.result = self.run(self.taskWorkArea) + self.result = self.run() os.write(1, 'done, iterations=%d, result=%r\n' % (iterations, self.result)) self.finished_lock.release() - def run(self, taskWorkArea): + def run(self): #with atomic: if 1: - taskWorkArea.holdCount = 0 - taskWorkArea.qpktCount = 0 + taskWorkArea = TaskWorkArea() IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec(), taskWorkArea) From noreply at buildbot.pypy.org Mon May 5 14:17:08 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 14:17:08 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: emphasize atomic block parallelisation Message-ID: <20140505121708.07EE71C130C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5237:458ea996a081 Date: 2014-05-05 14:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/458ea996a081/ Log: emphasize atomic block parallelisation diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -119,7 +119,7 @@ overall winner. While it has a big performance problem currently, it gets more points in all the other categories. We think that it is the only solution that also provides a better synchronisation mechanism to -the application in the form of atomic blocks. +the application in the form of paralleliseable atomic blocks. %% \subsection{Issue} %% The issue that we want to discuss is how to efficiently support @@ -209,9 +209,9 @@ Since the GIL is mostly an implementation detail of the interpreter, it is not exposed to the application running on top of it. To synchronise memory accesses in applications using threads, the -state-of-the-art still means explicit locking everywhere. It is well -known that using locks for synchronisation is not -easy~\cite{christopher10,victor11,shan08}. They are non-composable, +state-of-the-art still means explicit locking everywhere. It is +known that using locks for synchronisation can be hard at +times~\cite{christopher10,victor11,shan08}. They are non-composable, have overhead, may deadlock, limit scalability, and add to the overall complexity of the program logic. For a better parallel programming model for dynamic languages, we propose another, well-known @@ -220,12 +220,12 @@ Atomic blocks are composable, deadlock-free, higher-level and expose useful atomicity and isolation guarantees to the application for a -series of instructions. Interpreters using a GIL can simply guarantee +series of instructions. Interpreters using a GIL can simply guarantee that the GIL is not released during the execution of the atomic block. Of course, this still means that no two atomic blocks can -execute in parallel or even concurrently. Potential solutions that -provide a good way to implement atomic blocks are therefore -preferable. +execute in parallel or even concurrently. Potential solutions are +preferable if they provide a good way to implement atomic blocks that +are also able to be executed in parallel. @@ -240,7 +240,7 @@ GIL on a single and on multiple threads? \item[Existing applications:] How big are the changes required to integrate with and parallelise existing applications? -\item[Better synchronisation:] Does the approach enable better +\item[Better synchronisation:] Does the approach enable better, paralleliseable synchronisation mechanisms for applications (e.g. atomic blocks)? \item[Implementation:] How difficult is it to implement the approach in the interpreter? @@ -362,7 +362,7 @@ multiple threads. The one thing that is missing is support for a better synchronisation mechanism for the application. It is not possible in general to expose the hardware-transactions to the -application in the form of atomic blocks because that would require +application in the form of atomic blocks, because that would require much longer transactions. %% - false-sharing on cache-line level\\ From noreply at buildbot.pypy.org Mon May 5 14:17:09 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 14:17:09 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add pdf Message-ID: <20140505121709.3153C1C130C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5238:642543cf5085 Date: 2014-05-05 14:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/642543cf5085/ Log: add pdf diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8a9a6a4ac4f35959a4eec538ef2bc6d20ef057b1 GIT binary patch [cut] From noreply at buildbot.pypy.org Mon May 5 15:27:44 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 15:27:44 +0200 (CEST) Subject: [pypy-commit] benchmarks default: extend mandelbrot.py Message-ID: <20140505132744.2885C1D23C3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r258:f7ba852bdc89 Date: 2014-05-05 15:27 +0200 http://bitbucket.org/pypy/benchmarks/changeset/f7ba852bdc89/ Log: extend mandelbrot.py diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py --- a/multithread/mandelbrot/mandelbrot.py +++ b/multithread/mandelbrot/mandelbrot.py @@ -52,23 +52,25 @@ return res -def run(threads=2): +def run(threads=2, stripes=16): threads = int(threads) + stripes = int(stripes) + assert stripes >= threads ar, ai = -2.0, -1.5 br, bi = 1.0, 1.5 width, height = 4096, 4096 set_thread_pool(ThreadPool(threads)) - step = (bi - ai) / threads + step = (bi - ai) / stripes res = [] ai = -1.5 bi = ai + step parallel_time = time.time() - for i in xrange(threads): + for i in xrange(stripes): res.append(Future(calculate, a=(ar, ai + i * step), b=(br, bi + i * step), - im_size=(width, int(height / threads)) + im_size=(width, int(height / stripes)) )) res = [f() for f in res] From noreply at buildbot.pypy.org Mon May 5 16:11:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 16:11:16 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Tweaks tweaks tweaks Message-ID: <20140505141116.477B31C130C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5239:fbc8c4259336 Date: 2014-05-05 16:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/fbc8c4259336/ Log: Tweaks tweaks tweaks diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf index 8a9a6a4ac4f35959a4eec538ef2bc6d20ef057b1..d0e11f70c50dc0b0e812b86a8ade78182e267737 GIT binary patch [cut] diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -72,7 +72,8 @@ fine-grained locking, shared-nothing, and transactional memory (TM) approaches. We argue that software-based TM systems are the most promising, especially since they also enable the introduction of - atomic blocks as a better synchronisation mechanism in the language. + large, parallelisable atomic blocks as a better synchronisation + mechanism in the language. \end{abstract} %\category{CR-number}{subcategory}{third-level} @@ -111,7 +112,7 @@ The approach that wins in the end should perform similarly for single-threaded execution as compared to the GIL and be able to execute code in parallel on multiple cores. Furthermore, we will also -take into account the compatibility to existing code that already uses +take into account the compatibility with existing code that may already use threads for concurrency, as well as the changes that are required to the interpreter itself. @@ -119,7 +120,7 @@ overall winner. While it has a big performance problem currently, it gets more points in all the other categories. We think that it is the only solution that also provides a better synchronisation mechanism to -the application in the form of paralleliseable atomic blocks. +the application in the form of parallelisable atomic blocks. %% \subsection{Issue} %% The issue that we want to discuss is how to efficiently support @@ -157,7 +158,7 @@ \hline Existing applications & ++ & ++ & -{-} & ++ & ++ \\ \hline - Better synchronisation & o & o & o & o & ++ \\ + Better synchronisation & o & o & & o & ++ \\ \hline Implementation & ++ & - & ++ & ++ & ++ \\ \hline @@ -265,11 +266,13 @@ acquire-release operations. Jython~\cite{webjython} is one project that implements an -interpreter for Python on the JVM\footnote{Java Virtual Machine} and -that uses fine-grained locking to correctly synchronise the +interpreter for Python on the Java Virtual Machine (JVM) and +that uses fine-grained locking\footnote{The performance impact of +fine-grained locking is milder in Java than it would be in a typical piece +of C code; see e.g.~\cite{biased}.} to correctly synchronise the interpreter. For a language like Python, one needs quite a few, carefully placed locks. Since there is no central location, the -complexity of the implementation is quite a bit greater compared to +complexity of the implementation is quite a bit larger compared to using a GIL. Integrating external, non-thread-safe libraries should however be very simple too. One could simply use one lock per library to avoid this issue. @@ -296,25 +299,25 @@ to replace it. If an application can be split into completely independent parts that only very rarely need to share anything, or only do so via an external program like a database, then it is -sensible to have one GIL per independent part. As an example of such -an approach we look at the +sensible to have one GIL per independent part. At the extreme, there +are applications that parallelise perfectly simply by running +independent processes; some web servers and some numeric computations +do. We will consider here a slightly more general approach: the \emph{multiprocessing}\footnote{https://docs.python.org/2/library/multiprocessing.html} module of Python. In essence, it uses process-forking to provide the application with multiple interpreters that can run in parallel. Communication is then done explicitly through pipes. -Obviously not every application fits well into this model and its -applicability is therefore quite limited. Performance is good as +The model of explicit communication is sometimes seen as a superior +way to synchronise concurrent applications because of its explicitness. +However, not every application fits well into this model and its +applicability is therefore limited. Performance is good as long as the application does not need to communicate a lot, because inter-process communication is relatively expensive. Also the implementation of this approach is very cheap since one can actually take an unmodified GIL-supported interpreter and run -multiple of them in parallel. That way, we also inherit the +several of them in parallel. That way, we also inherit the easy integration of external libraries without any changes. -While the model of explicit communication is often seen as a -superior way to synchronise concurrent applications because -of its explicitness, it does not actually introduce a better -synchronisation mechanism for applications. %% - often needs major restructuring of programs (explicit data exchange)\\ %% - sometimes communication overhead is too large\\ @@ -361,9 +364,9 @@ library that needs to be integrated and synchronised for use in multiple threads. The one thing that is missing is support for a better synchronisation mechanism for the application. It is not -possible in general to expose the hardware-transactions to the -application in the form of atomic blocks, because that would require -much longer transactions. +reasonable in general to expose the hardware-transactions to the +application in the form of atomic blocks, because doing so would +require the system to support much longer transactions. %% - false-sharing on cache-line level\\ %% - limited capacity (caches, undocumented)\\ @@ -378,7 +381,7 @@ no benefits at all for low numbers of threads (1-8). There are some attempts ~\cite{warmhoff13,spear09} that can reduce the overhead a lot, but scale badly or only for certain workloads. Often the benefits -on more than one thread are too little in real world applications. +on more than one thread are too small in real world applications. However, STM compared to HTM does not suffer from the same restricting limitations. Transactions can in principle be arbitrarily long. This makes it @@ -391,12 +394,13 @@ parallel programming forward. Together with sequential consistency it provides a lot of simplification for parallel applications. -While one can argue that STM requires the insertion of read and write -barriers in the whole program, this can be done automatically and +On the implementation level, +while one can argue that STM requires the insertion of read and write +barriers in the whole interpreter, this can be done automatically and locally by a program transformation~\cite{felber07}. There are attempts to do the same for fine-grained locking~\cite{bill06} but they require -a whole program analysis since locks are inherently non-composable. -The effectiveness of these approaches is doubtful in our use case, +a whole program analysis since locks are inherently non-composable +--- and their effectiveness is doubtful in our use case, since we execute bytecode instructions in any order defined by a script only known at runtime. This makes it close to impossible to order locks consistently or to know in advance which locks a @@ -438,7 +442,7 @@ tailor the system to the discussed use case which gives us an advantage over other STM systems that are more general. With this approach, initial results suggest that we can keep the overhead of STM -below 50\%. A hybrid TM system, which also uses HTM to accelerate +well below 50\%. A hybrid TM system, which also uses HTM to accelerate certain tasks, looks like a very promising direction of research too. We believe that further work to reduce the overhead of STM is very worthwhile. @@ -558,6 +562,13 @@ Hardware Transactional Memory in Main-Memory Databases." \emph{Proc. of ICDE}. 2014. +\bibitem{biased} + Kenneth Russell and David Detlefs. 2006. Eliminating + synchronization-related atomic operations with biased locking and + bulk rebiasing. \emph{In Proceedings of the 21st annual ACM SIGPLAN + conference on Object-oriented programing, systems, languages, and + applications} (OOPSLA '06). + \end{thebibliography} From noreply at buildbot.pypy.org Mon May 5 16:25:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 16:25:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix Message-ID: <20140505142528.9044B1C155F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71278:1034226b87f2 Date: 2014-05-05 16:24 +0200 http://bitbucket.org/pypy/pypy/changeset/1034226b87f2/ Log: Fix diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include From noreply at buildbot.pypy.org Mon May 5 16:31:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 16:31:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Move the table to page 3. Message-ID: <20140505143143.922041C155F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5240:11071f8ef812 Date: 2014-05-05 16:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/11071f8ef812/ Log: Move the table to page 3. diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf index d0e11f70c50dc0b0e812b86a8ade78182e267737..61eddb1d20c2838e3b9a63f29aa5080dbda0a2aa GIT binary patch [cut] diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -145,30 +145,6 @@ %% fix that. -\begin{table*}[ht] - \centering - \begin{tabular}{|l|c|c|c|c|c|} - \hline - & \textbf{GIL} & \textbf{Fine-grained locking} - & \textbf{Shared-nothing} & \textbf{HTM} & \textbf{STM}\\ - \hline - Performance (single threaded) & ++ & + & ++ & ++ & -{-} \\ - \hline - Performance (multithreaded) & -{-} & + & + & + & + \\ - \hline - Existing applications & ++ & ++ & -{-} & ++ & ++ \\ - \hline - Better synchronisation & o & o & & o & ++ \\ - \hline - Implementation & ++ & - & ++ & ++ & ++ \\ - \hline - External libraries & ++ & ++ & ++ & ++ & ++ \\ - \hline - \end{tabular} - \caption{Comparison between the approaches (-{-}/-/o/+/++)} - \label{tab:comparison} -\end{table*} - \section{Discussion} In this section we examine the approaches and highlight their @@ -230,6 +206,32 @@ +\begin{table*}[ht] + \centering + \begin{tabular}{|l|c|c|c|c|c|} + \hline + & \textbf{GIL} & \textbf{Fine-grained locking} + & \textbf{Shared-nothing} & \textbf{HTM} & \textbf{STM}\\ + \hline + Performance (single threaded) & ++ & + & ++ & ++ & -{-} \\ + \hline + Performance (multithreaded) & -{-} & + & + & + & + \\ + \hline + Existing applications & ++ & ++ & -{-} & ++ & ++ \\ + \hline + Better synchronisation & o & o & & o & ++ \\ + \hline + Implementation & ++ & - & ++ & ++ & ++ \\ + \hline + External libraries & ++ & ++ & ++ & ++ & ++ \\ + \hline + \end{tabular} + \caption{Comparison between the approaches (-{-}/-/o/+/++)} + \label{tab:comparison} +\end{table*} + + + \subsection{Potential Solutions} For the discussion we define a set of criteria to evaluate the From noreply at buildbot.pypy.org Mon May 5 17:00:23 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 17:00:23 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add concluding sentence Message-ID: <20140505150023.83D2F1C01DE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5241:379da0f357d3 Date: 2014-05-05 17:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/379da0f357d3/ Log: add concluding sentence diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf index 61eddb1d20c2838e3b9a63f29aa5080dbda0a2aa..d50af6d75293ebca7607edd49d1c57321cf7e97f GIT binary patch [cut] diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -417,7 +417,7 @@ \section{The Way Forward} -Following the above argumentation for each approach we assembled a +Following the above argumentation for each approach, we assembled a general overview in Table \ref{tab:comparison}. The general picture is everything else than clear. It looks like HTM may be a good solution to replace the GIL in the near future. Current implementations are @@ -446,10 +446,15 @@ approach, initial results suggest that we can keep the overhead of STM well below 50\%. A hybrid TM system, which also uses HTM to accelerate certain tasks, looks like a very promising direction of research -too. We believe that further work to reduce the overhead of STM is -very worthwhile. +too. - +We believe that further work to reduce the overhead of STM is +very worthwhile. In fact, considering some analogies that have been +drawn between garbage collection and transactional memory \cite{dan07}, +we believe that it is worthwhile to focus the STM research more +specifically on the context shown in this paper --- for use in +implementations of high-level languages, rather than as a tool +directly used by the programmer. %% possible solution:\\ @@ -475,6 +480,12 @@ \begin{thebibliography}{} \softraggedright +\bibitem{dan07} + Dan Grossman. 2007. The transactional memory / garbage collection + analogy. \emph{In Proceedings of the 22nd annual ACM SIGPLAN + conference on Object-oriented programming systems and + applications} (OOPSLA '07). + \bibitem{webjython} The Jython Project, \url{www.jython.org} From noreply at buildbot.pypy.org Mon May 5 17:02:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 17:02:10 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: ~\cite Message-ID: <20140505150210.C194A1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5242:1cbe6de6b5e2 Date: 2014-05-05 17:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/1cbe6de6b5e2/ Log: ~\cite diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -450,7 +450,7 @@ We believe that further work to reduce the overhead of STM is very worthwhile. In fact, considering some analogies that have been -drawn between garbage collection and transactional memory \cite{dan07}, +drawn between garbage collection and transactional memory~\cite{dan07}, we believe that it is worthwhile to focus the STM research more specifically on the context shown in this paper --- for use in implementations of high-level languages, rather than as a tool From noreply at buildbot.pypy.org Mon May 5 17:02:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 17:02:52 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: regen Message-ID: <20140505150252.D27E61C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5243:7a830b8d3eed Date: 2014-05-05 17:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/7a830b8d3eed/ Log: regen diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf index d50af6d75293ebca7607edd49d1c57321cf7e97f..1b6cdc2799253a841eb294649452de2bd5a91af1 GIT binary patch [cut] From noreply at buildbot.pypy.org Mon May 5 17:08:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 May 2014 17:08:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Fix, maybe? Message-ID: <20140505150843.AF19F1D2371@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5244:cdc97c676401 Date: 2014-05-05 17:08 +0200 http://bitbucket.org/pypy/extradoc/changeset/cdc97c676401/ Log: Fix, maybe? diff --git a/talk/icooolps2014/position-paper.pdf b/talk/icooolps2014/position-paper.pdf index 1b6cdc2799253a841eb294649452de2bd5a91af1..473787d313f0bad0af373cb268d3e6b7cdb17d6d GIT binary patch [cut] diff --git a/talk/icooolps2014/position-paper.tex b/talk/icooolps2014/position-paper.tex --- a/talk/icooolps2014/position-paper.tex +++ b/talk/icooolps2014/position-paper.tex @@ -452,7 +452,7 @@ very worthwhile. In fact, considering some analogies that have been drawn between garbage collection and transactional memory~\cite{dan07}, we believe that it is worthwhile to focus the STM research more -specifically on the context shown in this paper --- for use in +specifically onto the context shown in this paper --- for use in implementations of high-level languages, rather than as a tool directly used by the programmer. From noreply at buildbot.pypy.org Mon May 5 17:35:58 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 17:35:58 +0200 (CEST) Subject: [pypy-commit] benchmarks default: this shows cpyext not working on stm currently Message-ID: <20140505153558.788911D23D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r259:11c8ab4dc4b6 Date: 2014-05-05 17:35 +0200 http://bitbucket.org/pypy/benchmarks/changeset/11c8ab4dc4b6/ Log: this shows cpyext not working on stm currently diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py --- a/multithread/mandelbrot/mandelbrot.py +++ b/multithread/mandelbrot/mandelbrot.py @@ -27,7 +27,7 @@ return result def save_img(image, file_name='out.png'): - import Image + from PIL import Image im = Image.new("RGB", (len(image[0]), len(image))) out = im.load() @@ -53,6 +53,7 @@ def run(threads=2, stripes=16): + global out_image threads = int(threads) stripes = int(stripes) assert stripes >= threads @@ -77,12 +78,12 @@ parallel_time = time.time() - parallel_time set_thread_pool(None) - merge_imgs(res) + out_image = merge_imgs(res) return parallel_time if __name__ == '__main__': - image = run(int(sys.argv[1])) - save_to_file(image) - # save_img(image) don't run on STM, allocates 4000GB of memory + image = run(int(sys.argv[1]), int(sys.argv[2])) + #save_to_file(out_image) + save_img(out_image) From noreply at buildbot.pypy.org Mon May 5 18:01:50 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 5 May 2014 18:01:50 +0200 (CEST) Subject: [pypy-commit] benchmarks default: make threadworms use a deque() because a linked list is better for STM Message-ID: <20140505160150.15C711D236E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r260:0ccaad335031 Date: 2014-05-05 18:01 +0200 http://bitbucket.org/pypy/benchmarks/changeset/0ccaad335031/ Log: make threadworms use a deque() because a linked list is better for STM diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -1,8 +1,15 @@ from Queue import Queue, Empty, Full -from threading import Thread, Condition, Lock, local +from threading import Thread, Condition, RLock, local import thread, atexit, sys, time -from atomic import atomic, getsegmentlimit, print_abort_info +try: + from atomic import atomic, getsegmentlimit, print_abort_info +except: + atomic = RLock() + def getsegmentlimit(): + return 1 + def print_abort_info(tm=0.0): + pass class TLQueue_concurrent(object): @@ -102,6 +109,7 @@ def shutdown(self): for w in self.workers: + self.input_queue.put((print_abort_info, (), {})) self.input_queue.put((sys.exit, (), {})) for w in self.workers: w.join() diff --git a/multithread/threadworms/threadworms.py b/multithread/threadworms/threadworms.py --- a/multithread/threadworms/threadworms.py +++ b/multithread/threadworms/threadworms.py @@ -130,9 +130,16 @@ NUM_WORMS = int(worms) NUM_STEPS = int(steps) // NUM_WORMS - GRID = [] + # using a deque instead of a list is kind of cheating + # since it is a linked list of blocks. This means + # that there are less conflicts. + # So maybe remove this again when we support array-barriers in STM + import collections + list_to_use = collections.deque #list + + GRID = list_to_use() for x in range(CELLS_WIDE): - GRID.append([None] * CELLS_HIGH) + GRID.append(list_to_use([None] * CELLS_HIGH)) #GRID_LOCK = threading.Lock() # pun was not intended # Draw some walls on the grid From noreply at buildbot.pypy.org Mon May 5 21:03:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 5 May 2014 21:03:01 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix searchsorted return type for scalars Message-ID: <20140505190301.5ADB41C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71279:3581f7a906c9 Date: 2014-05-05 15:02 -0400 http://bitbucket.org/pypy/pypy/changeset/3581f7a906c9/ Log: test/fix searchsorted return type for scalars diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -738,6 +738,8 @@ ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) + if ret.is_scalar(): + return ret.get_scalar_value() return ret def descr_setasflat(self, space, w_v): diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -351,13 +351,21 @@ assert (x.argsort(kind='m') == np.arange(32)).all() def test_searchsort(self): - from numpy import arange + import numpy as np import sys - a = arange(1, 6) + a = np.arange(1, 6) ret = a.searchsorted(3) assert ret == 2 + assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array(3)) + assert ret == 2 + assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array([3])) + assert ret == 2 + assert isinstance(ret, np.ndarray) ret = a.searchsorted(3, side='right') assert ret == 3 + assert isinstance(ret, np.generic) ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: From noreply at buildbot.pypy.org Mon May 5 21:08:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 5 May 2014 21:08:10 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140505190810.8FEB61C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71280:00ddc2f3a097 Date: 2014-05-05 11:44 -0700 http://bitbucket.org/pypy/pypy/changeset/00ddc2f3a097/ Log: merge default diff too long, truncating to 2000 out of 3049 lines diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -87,6 +87,10 @@ * Support for corner cases on objects with __int__ and __float__ methods +* Fix multithreaded support for gethostbyname_ex and gethostbyaddr + +* Fix handling of tp_name for type objects + .. _`HippyVM`: http://www.hippyvm.com New Platforms and Features diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -154,7 +154,7 @@ Improve optimization of small allocation-heavy loops in the JIT .. branch: reflex-support - + .. branch: asmosoinio/fixed-pip-installation-url-github-githu-1398674840188 .. branch: lexer_token_position_class @@ -165,3 +165,5 @@ .. branch: issue1430 Add a lock for unsafe calls to gethostbyname and gethostbyaddr +.. branch: fix-tpname +Changes hacks surrounding W_TypeObject.name to match CPython's tp_name diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 0f75ad4d14ce - +.. startrev: ec864bd08d50 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -121,10 +121,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, @@ -2883,6 +2882,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(mod)), ) +mod.typedef.heaptype = True def Module_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2930,6 +2930,7 @@ __new__=interp2app(get_AST_new(Module)), __init__=interp2app(Module_init), ) +Module.typedef.heaptype = True def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -2977,6 +2978,7 @@ __new__=interp2app(get_AST_new(Interactive)), __init__=interp2app(Interactive_init), ) +Interactive.typedef.heaptype = True def Expression_get_body(space, w_self): if w_self.w_dict is not None: @@ -3030,6 +3032,7 @@ __new__=interp2app(get_AST_new(Expression)), __init__=interp2app(Expression_init), ) +Expression.typedef.heaptype = True def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: @@ -3077,6 +3080,7 @@ __new__=interp2app(get_AST_new(Suite)), __init__=interp2app(Suite_init), ) +Suite.typedef.heaptype = True def stmt_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -3142,6 +3146,7 @@ col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), __new__=interp2app(get_AST_new(stmt)), ) +stmt.typedef.heaptype = True def FunctionDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3302,6 +3307,7 @@ __new__=interp2app(get_AST_new(FunctionDef)), __init__=interp2app(FunctionDef_init), ) +FunctionDef.typedef.heaptype = True def ClassDef_get_name(space, w_self): if w_self.w_dict is not None: @@ -3512,6 +3518,7 @@ __new__=interp2app(get_AST_new(ClassDef)), __init__=interp2app(ClassDef_init), ) +ClassDef.typedef.heaptype = True def Return_get_value(space, w_self): if w_self.w_dict is not None: @@ -3565,6 +3572,7 @@ __new__=interp2app(get_AST_new(Return)), __init__=interp2app(Return_init), ) +Return.typedef.heaptype = True def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3612,6 +3620,7 @@ __new__=interp2app(get_AST_new(Delete)), __init__=interp2app(Delete_init), ) +Delete.typedef.heaptype = True def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: @@ -3689,6 +3698,7 @@ __new__=interp2app(get_AST_new(Assign)), __init__=interp2app(Assign_init), ) +Assign.typedef.heaptype = True def AugAssign_get_target(space, w_self): if w_self.w_dict is not None: @@ -3802,6 +3812,7 @@ __new__=interp2app(get_AST_new(AugAssign)), __init__=interp2app(AugAssign_init), ) +AugAssign.typedef.heaptype = True def For_get_target(space, w_self): if w_self.w_dict is not None: @@ -3933,6 +3944,7 @@ __new__=interp2app(get_AST_new(For)), __init__=interp2app(For_init), ) +For.typedef.heaptype = True def While_get_test(space, w_self): if w_self.w_dict is not None: @@ -4034,6 +4046,7 @@ __new__=interp2app(get_AST_new(While)), __init__=interp2app(While_init), ) +While.typedef.heaptype = True def If_get_test(space, w_self): if w_self.w_dict is not None: @@ -4135,6 +4148,7 @@ __new__=interp2app(get_AST_new(If)), __init__=interp2app(If_init), ) +If.typedef.heaptype = True def With_get_context_expr(space, w_self): if w_self.w_dict is not None: @@ -4242,6 +4256,7 @@ __new__=interp2app(get_AST_new(With)), __init__=interp2app(With_init), ) +With.typedef.heaptype = True def Raise_get_exc(space, w_self): if w_self.w_dict is not None: @@ -4325,6 +4340,7 @@ __new__=interp2app(get_AST_new(Raise)), __init__=interp2app(Raise_init), ) +Raise.typedef.heaptype = True def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4420,6 +4436,7 @@ __new__=interp2app(get_AST_new(TryExcept)), __init__=interp2app(TryExcept_init), ) +TryExcept.typedef.heaptype = True def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: @@ -4491,6 +4508,7 @@ __new__=interp2app(get_AST_new(TryFinally)), __init__=interp2app(TryFinally_init), ) +TryFinally.typedef.heaptype = True def Assert_get_test(space, w_self): if w_self.w_dict is not None: @@ -4574,6 +4592,7 @@ __new__=interp2app(get_AST_new(Assert)), __init__=interp2app(Assert_init), ) +Assert.typedef.heaptype = True def Import_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4621,6 +4640,7 @@ __new__=interp2app(get_AST_new(Import)), __init__=interp2app(Import_init), ) +Import.typedef.heaptype = True def ImportFrom_get_module(space, w_self): if w_self.w_dict is not None: @@ -4731,6 +4751,7 @@ __new__=interp2app(get_AST_new(ImportFrom)), __init__=interp2app(ImportFrom_init), ) +ImportFrom.typedef.heaptype = True def Global_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4778,6 +4799,7 @@ __new__=interp2app(get_AST_new(Global)), __init__=interp2app(Global_init), ) +Global.typedef.heaptype = True def Nonlocal_get_names(space, w_self): if not w_self.initialization_state & 4: @@ -4825,6 +4847,7 @@ __new__=interp2app(get_AST_new(Nonlocal)), __init__=interp2app(Nonlocal_init), ) +Nonlocal.typedef.heaptype = True def Expr_get_value(space, w_self): if w_self.w_dict is not None: @@ -4878,6 +4901,7 @@ __new__=interp2app(get_AST_new(Expr)), __init__=interp2app(Expr_init), ) +Expr.typedef.heaptype = True def Pass_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Pass, w_self) @@ -4895,6 +4919,7 @@ __new__=interp2app(get_AST_new(Pass)), __init__=interp2app(Pass_init), ) +Pass.typedef.heaptype = True def Break_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Break, w_self) @@ -4912,6 +4937,7 @@ __new__=interp2app(get_AST_new(Break)), __init__=interp2app(Break_init), ) +Break.typedef.heaptype = True def Continue_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Continue, w_self) @@ -4929,6 +4955,7 @@ __new__=interp2app(get_AST_new(Continue)), __init__=interp2app(Continue_init), ) +Continue.typedef.heaptype = True def expr_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -4994,6 +5021,7 @@ col_offset=typedef.GetSetProperty(expr_get_col_offset, expr_set_col_offset, expr_del_col_offset, cls=expr), __new__=interp2app(get_AST_new(expr)), ) +expr.typedef.heaptype = True def BoolOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5071,6 +5099,7 @@ __new__=interp2app(get_AST_new(BoolOp)), __init__=interp2app(BoolOp_init), ) +BoolOp.typedef.heaptype = True def BinOp_get_left(space, w_self): if w_self.w_dict is not None: @@ -5184,6 +5213,7 @@ __new__=interp2app(get_AST_new(BinOp)), __init__=interp2app(BinOp_init), ) +BinOp.typedef.heaptype = True def UnaryOp_get_op(space, w_self): if w_self.w_dict is not None: @@ -5267,6 +5297,7 @@ __new__=interp2app(get_AST_new(UnaryOp)), __init__=interp2app(UnaryOp_init), ) +UnaryOp.typedef.heaptype = True def Lambda_get_args(space, w_self): if w_self.w_dict is not None: @@ -5348,6 +5379,7 @@ __new__=interp2app(get_AST_new(Lambda)), __init__=interp2app(Lambda_init), ) +Lambda.typedef.heaptype = True def IfExp_get_test(space, w_self): if w_self.w_dict is not None: @@ -5461,6 +5493,7 @@ __new__=interp2app(get_AST_new(IfExp)), __init__=interp2app(IfExp_init), ) +IfExp.typedef.heaptype = True def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: @@ -5532,6 +5565,7 @@ __new__=interp2app(get_AST_new(Dict)), __init__=interp2app(Dict_init), ) +Dict.typedef.heaptype = True def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -5579,6 +5613,7 @@ __new__=interp2app(get_AST_new(Set)), __init__=interp2app(Set_init), ) +Set.typedef.heaptype = True def ListComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5656,6 +5691,7 @@ __new__=interp2app(get_AST_new(ListComp)), __init__=interp2app(ListComp_init), ) +ListComp.typedef.heaptype = True def SetComp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5733,6 +5769,7 @@ __new__=interp2app(get_AST_new(SetComp)), __init__=interp2app(SetComp_init), ) +SetComp.typedef.heaptype = True def DictComp_get_key(space, w_self): if w_self.w_dict is not None: @@ -5840,6 +5877,7 @@ __new__=interp2app(get_AST_new(DictComp)), __init__=interp2app(DictComp_init), ) +DictComp.typedef.heaptype = True def GeneratorExp_get_elt(space, w_self): if w_self.w_dict is not None: @@ -5917,6 +5955,7 @@ __new__=interp2app(get_AST_new(GeneratorExp)), __init__=interp2app(GeneratorExp_init), ) +GeneratorExp.typedef.heaptype = True def Yield_get_value(space, w_self): if w_self.w_dict is not None: @@ -5970,6 +6009,7 @@ __new__=interp2app(get_AST_new(Yield)), __init__=interp2app(Yield_init), ) +Yield.typedef.heaptype = True def Compare_get_left(space, w_self): if w_self.w_dict is not None: @@ -6071,6 +6111,7 @@ __new__=interp2app(get_AST_new(Compare)), __init__=interp2app(Compare_init), ) +Compare.typedef.heaptype = True def Call_get_func(space, w_self): if w_self.w_dict is not None: @@ -6232,6 +6273,7 @@ __new__=interp2app(get_AST_new(Call)), __init__=interp2app(Call_init), ) +Call.typedef.heaptype = True def Num_get_n(space, w_self): if w_self.w_dict is not None: @@ -6284,6 +6326,7 @@ __new__=interp2app(get_AST_new(Num)), __init__=interp2app(Num_init), ) +Num.typedef.heaptype = True def Str_get_s(space, w_self): if w_self.w_dict is not None: @@ -6336,6 +6379,7 @@ __new__=interp2app(get_AST_new(Str)), __init__=interp2app(Str_init), ) +Str.typedef.heaptype = True def Bytes_get_s(space, w_self): if w_self.w_dict is not None: @@ -6388,6 +6432,7 @@ __new__=interp2app(get_AST_new(Bytes)), __init__=interp2app(Bytes_init), ) +Bytes.typedef.heaptype = True def Ellipsis_init(space, w_self, __args__): w_self = space.descr_self_interp_w(Ellipsis, w_self) @@ -6405,6 +6450,7 @@ __new__=interp2app(get_AST_new(Ellipsis)), __init__=interp2app(Ellipsis_init), ) +Ellipsis.typedef.heaptype = True def Attribute_get_value(space, w_self): if w_self.w_dict is not None: @@ -6519,6 +6565,7 @@ __new__=interp2app(get_AST_new(Attribute)), __init__=interp2app(Attribute_init), ) +Attribute.typedef.heaptype = True def Subscript_get_value(space, w_self): if w_self.w_dict is not None: @@ -6632,6 +6679,7 @@ __new__=interp2app(get_AST_new(Subscript)), __init__=interp2app(Subscript_init), ) +Subscript.typedef.heaptype = True def Starred_get_value(space, w_self): if w_self.w_dict is not None: @@ -6715,6 +6763,7 @@ __new__=interp2app(get_AST_new(Starred)), __init__=interp2app(Starred_init), ) +Starred.typedef.heaptype = True def Name_get_id(space, w_self): if w_self.w_dict is not None: @@ -6799,6 +6848,7 @@ __new__=interp2app(get_AST_new(Name)), __init__=interp2app(Name_init), ) +Name.typedef.heaptype = True def List_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6876,6 +6926,7 @@ __new__=interp2app(get_AST_new(List)), __init__=interp2app(List_init), ) +List.typedef.heaptype = True def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: @@ -6953,6 +7004,7 @@ __new__=interp2app(get_AST_new(Tuple)), __init__=interp2app(Tuple_init), ) +Tuple.typedef.heaptype = True def Const_get_value(space, w_self): if w_self.w_dict is not None: @@ -7005,6 +7057,7 @@ __new__=interp2app(get_AST_new(Const)), __init__=interp2app(Const_init), ) +Const.typedef.heaptype = True expr_context.typedef = typedef.TypeDef("expr_context", AST.typedef, @@ -7012,6 +7065,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(expr_context)), ) +expr_context.typedef.heaptype = True _Load.typedef = typedef.TypeDef("Load", expr_context.typedef, @@ -7019,6 +7073,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Load)), ) +_Load.typedef.heaptype = True _Store.typedef = typedef.TypeDef("Store", expr_context.typedef, @@ -7026,6 +7081,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Store)), ) +_Store.typedef.heaptype = True _Del.typedef = typedef.TypeDef("Del", expr_context.typedef, @@ -7033,6 +7089,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Del)), ) +_Del.typedef.heaptype = True _AugLoad.typedef = typedef.TypeDef("AugLoad", expr_context.typedef, @@ -7040,6 +7097,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugLoad)), ) +_AugLoad.typedef.heaptype = True _AugStore.typedef = typedef.TypeDef("AugStore", expr_context.typedef, @@ -7047,6 +7105,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_AugStore)), ) +_AugStore.typedef.heaptype = True _Param.typedef = typedef.TypeDef("Param", expr_context.typedef, @@ -7054,6 +7113,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Param)), ) +_Param.typedef.heaptype = True slice.typedef = typedef.TypeDef("slice", AST.typedef, @@ -7061,6 +7121,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(slice)), ) +slice.typedef.heaptype = True def Slice_get_lower(space, w_self): if w_self.w_dict is not None: @@ -7174,6 +7235,7 @@ __new__=interp2app(get_AST_new(Slice)), __init__=interp2app(Slice_init), ) +Slice.typedef.heaptype = True def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: @@ -7221,6 +7283,7 @@ __new__=interp2app(get_AST_new(ExtSlice)), __init__=interp2app(ExtSlice_init), ) +ExtSlice.typedef.heaptype = True def Index_get_value(space, w_self): if w_self.w_dict is not None: @@ -7274,6 +7337,7 @@ __new__=interp2app(get_AST_new(Index)), __init__=interp2app(Index_init), ) +Index.typedef.heaptype = True boolop.typedef = typedef.TypeDef("boolop", AST.typedef, @@ -7281,6 +7345,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(boolop)), ) +boolop.typedef.heaptype = True _And.typedef = typedef.TypeDef("And", boolop.typedef, @@ -7288,6 +7353,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_And)), ) +_And.typedef.heaptype = True _Or.typedef = typedef.TypeDef("Or", boolop.typedef, @@ -7295,6 +7361,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Or)), ) +_Or.typedef.heaptype = True operator.typedef = typedef.TypeDef("operator", AST.typedef, @@ -7302,6 +7369,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(operator)), ) +operator.typedef.heaptype = True _Add.typedef = typedef.TypeDef("Add", operator.typedef, @@ -7309,6 +7377,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Add)), ) +_Add.typedef.heaptype = True _Sub.typedef = typedef.TypeDef("Sub", operator.typedef, @@ -7316,6 +7385,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Sub)), ) +_Sub.typedef.heaptype = True _Mult.typedef = typedef.TypeDef("Mult", operator.typedef, @@ -7323,6 +7393,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mult)), ) +_Mult.typedef.heaptype = True _Div.typedef = typedef.TypeDef("Div", operator.typedef, @@ -7330,6 +7401,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Div)), ) +_Div.typedef.heaptype = True _Mod.typedef = typedef.TypeDef("Mod", operator.typedef, @@ -7337,6 +7409,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Mod)), ) +_Mod.typedef.heaptype = True _Pow.typedef = typedef.TypeDef("Pow", operator.typedef, @@ -7344,6 +7417,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Pow)), ) +_Pow.typedef.heaptype = True _LShift.typedef = typedef.TypeDef("LShift", operator.typedef, @@ -7351,6 +7425,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LShift)), ) +_LShift.typedef.heaptype = True _RShift.typedef = typedef.TypeDef("RShift", operator.typedef, @@ -7358,6 +7433,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_RShift)), ) +_RShift.typedef.heaptype = True _BitOr.typedef = typedef.TypeDef("BitOr", operator.typedef, @@ -7365,6 +7441,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitOr)), ) +_BitOr.typedef.heaptype = True _BitXor.typedef = typedef.TypeDef("BitXor", operator.typedef, @@ -7372,6 +7449,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitXor)), ) +_BitXor.typedef.heaptype = True _BitAnd.typedef = typedef.TypeDef("BitAnd", operator.typedef, @@ -7379,6 +7457,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_BitAnd)), ) +_BitAnd.typedef.heaptype = True _FloorDiv.typedef = typedef.TypeDef("FloorDiv", operator.typedef, @@ -7386,6 +7465,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_FloorDiv)), ) +_FloorDiv.typedef.heaptype = True unaryop.typedef = typedef.TypeDef("unaryop", AST.typedef, @@ -7393,6 +7473,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(unaryop)), ) +unaryop.typedef.heaptype = True _Invert.typedef = typedef.TypeDef("Invert", unaryop.typedef, @@ -7400,6 +7481,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Invert)), ) +_Invert.typedef.heaptype = True _Not.typedef = typedef.TypeDef("Not", unaryop.typedef, @@ -7407,6 +7489,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Not)), ) +_Not.typedef.heaptype = True _UAdd.typedef = typedef.TypeDef("UAdd", unaryop.typedef, @@ -7414,6 +7497,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_UAdd)), ) +_UAdd.typedef.heaptype = True _USub.typedef = typedef.TypeDef("USub", unaryop.typedef, @@ -7421,6 +7505,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_USub)), ) +_USub.typedef.heaptype = True cmpop.typedef = typedef.TypeDef("cmpop", AST.typedef, @@ -7428,6 +7513,7 @@ _attributes=_FieldsWrapper([]), __new__=interp2app(get_AST_new(cmpop)), ) +cmpop.typedef.heaptype = True _Eq.typedef = typedef.TypeDef("Eq", cmpop.typedef, @@ -7435,6 +7521,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Eq)), ) +_Eq.typedef.heaptype = True _NotEq.typedef = typedef.TypeDef("NotEq", cmpop.typedef, @@ -7442,6 +7529,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotEq)), ) +_NotEq.typedef.heaptype = True _Lt.typedef = typedef.TypeDef("Lt", cmpop.typedef, @@ -7449,6 +7537,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Lt)), ) +_Lt.typedef.heaptype = True _LtE.typedef = typedef.TypeDef("LtE", cmpop.typedef, @@ -7456,6 +7545,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_LtE)), ) +_LtE.typedef.heaptype = True _Gt.typedef = typedef.TypeDef("Gt", cmpop.typedef, @@ -7463,6 +7553,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Gt)), ) +_Gt.typedef.heaptype = True _GtE.typedef = typedef.TypeDef("GtE", cmpop.typedef, @@ -7470,6 +7561,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_GtE)), ) +_GtE.typedef.heaptype = True _Is.typedef = typedef.TypeDef("Is", cmpop.typedef, @@ -7477,6 +7569,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_Is)), ) +_Is.typedef.heaptype = True _IsNot.typedef = typedef.TypeDef("IsNot", cmpop.typedef, @@ -7484,6 +7577,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_IsNot)), ) +_IsNot.typedef.heaptype = True _In.typedef = typedef.TypeDef("In", cmpop.typedef, @@ -7491,6 +7585,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_In)), ) +_In.typedef.heaptype = True _NotIn.typedef = typedef.TypeDef("NotIn", cmpop.typedef, @@ -7498,6 +7593,7 @@ _fields=_FieldsWrapper([]), __new__=interp2app(get_AST_new(_NotIn)), ) +_NotIn.typedef.heaptype = True def comprehension_get_target(space, w_self): if w_self.w_dict is not None: @@ -7605,6 +7701,7 @@ __new__=interp2app(get_AST_new(comprehension)), __init__=interp2app(comprehension_init), ) +comprehension.typedef.heaptype = True def excepthandler_get_lineno(space, w_self): if w_self.w_dict is not None: @@ -7670,6 +7767,7 @@ col_offset=typedef.GetSetProperty(excepthandler_get_col_offset, excepthandler_set_col_offset, excepthandler_del_col_offset, cls=excepthandler), __new__=interp2app(get_AST_new(excepthandler)), ) +excepthandler.typedef.heaptype = True def ExceptHandler_get_type(space, w_self): if w_self.w_dict is not None: @@ -7781,6 +7879,7 @@ __new__=interp2app(get_AST_new(ExceptHandler)), __init__=interp2app(ExceptHandler_init), ) +ExceptHandler.typedef.heaptype = True def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: @@ -8028,6 +8127,7 @@ __new__=interp2app(get_AST_new(arguments)), __init__=interp2app(arguments_init), ) +arguments.typedef.heaptype = True def arg_get_arg(space, w_self): if w_self.w_dict is not None: @@ -8112,6 +8212,7 @@ __new__=interp2app(get_AST_new(arg)), __init__=interp2app(arg_init), ) +arg.typedef.heaptype = True def keyword_get_arg(space, w_self): if w_self.w_dict is not None: @@ -8196,6 +8297,7 @@ __new__=interp2app(get_AST_new(keyword)), __init__=interp2app(keyword_init), ) +keyword.typedef.heaptype = True def alias_get_name(space, w_self): if w_self.w_dict is not None: @@ -8284,4 +8386,5 @@ __new__=interp2app(get_AST_new(alias)), __init__=interp2app(alias_init), ) - +alias.typedef.heaptype = True + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -359,6 +359,7 @@ if needs_init: self.emit("__init__=interp2app(%s_init)," % (name,), 1) self.emit(")") + self.emit("%s.typedef.heaptype = True" % name) self.emit("") def make_init(self, name, fields): @@ -680,10 +681,9 @@ for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("AST", +AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __module__='_ast', __reduce__=interp2app(AST.reduce_w), __setstate__=interp2app(AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -392,11 +392,7 @@ self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - from pypy.interpreter.pycode import cpython_magic, default_magic - self.our_magic = default_magic - self.host_magic = cpython_magic # can be overridden to a subclass - self.initialize() def startup(self): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -416,7 +416,7 @@ elif fmt == 'R': result = space.unicode_w(space.repr(value)) elif fmt == 'T': - result = space.type(value).get_module_type_name() + result = space.type(value).name.decode('utf-8') elif fmt == 'N': result = value.getname(space) elif fmt == '8': @@ -461,7 +461,7 @@ %8 - The result of arg.decode('utf-8') %N - The result of w_arg.getname(space) %R - The result of space.unicode_w(space.repr(w_arg)) - %T - The result of space.type(w_arg).get_module_type_name() + %T - The result of space.type(w_arg).name """ if not len(args): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -1101,7 +1101,9 @@ return x+y ''') """ + prefix = "" if not isinstance(source, str): + flags = source.__code__.co_flags source = py.std.inspect.getsource(source).lstrip() while source.startswith(('@py.test.mark.', '@pytest.mark.')): # these decorators are known to return the same function @@ -1110,12 +1112,21 @@ source = source[source.find('\n') + 1:].lstrip() assert source.startswith("def "), "can only transform functions" source = source[4:] + import __future__ + if flags & __future__.CO_FUTURE_DIVISION: + prefix += "from __future__ import division\n" + if flags & __future__.CO_FUTURE_ABSOLUTE_IMPORT: + prefix += "from __future__ import absolute_import\n" + if flags & __future__.CO_FUTURE_PRINT_FUNCTION: + prefix += "from __future__ import print_function\n" + if flags & __future__.CO_FUTURE_UNICODE_LITERALS: + prefix += "from __future__ import unicode_literals\n" p = source.find('(') assert p >= 0 funcname = source[:p].strip() source = source[p:] assert source.strip() - funcsource = "def %s%s\n" % (funcname, source) + funcsource = prefix + "def %s%s\n" % (funcname, source) #for debugging of wrong source code: py.std.parser.suite(funcsource) a = applevel(funcsource, filename=filename) return a.interphook(funcname) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -1,12 +1,13 @@ - # -*- coding: utf-8 -*- +from __future__ import division, print_function # for test_app2interp_future from pypy.interpreter import gateway, argument from pypy.interpreter.gateway import ObjSpace, W_Root, WrappedDefault from pypy.interpreter.signature import Signature import py import sys + class FakeFunc(object): def __init__(self, space, name): self.space = space @@ -14,6 +15,7 @@ self.defs_w = [] self.w_kw_defs = None + class TestBuiltinCode: def test_signature(self): def c(space, w_x, w_y, hello_w): @@ -90,8 +92,8 @@ w_result = code.funcrun(FakeFunc(self.space, "c"), args) assert self.space.eq_w(w_result, w(1020)) + class TestGateway: - def test_app2interp(self): w = self.space.wrap def app_g3(a, b): @@ -118,6 +120,14 @@ args = gateway.Arguments(self.space, [w(6)], ['hello', 'world'], [w(7), w(8)]) assert self.space.int_w(gg(self.space, w(3), args)) == 213 + def test_app2interp_future(self): + w = self.space.wrap + def app_g3(a, b): + print(end='') + return a / b + g3 = gateway.app2interp_temp(app_g3) + assert self.space.eq_w(g3(self.space, w(1), w(4),), w(0.25)) + def test_interp2app(self): space = self.space w = space.wrap @@ -628,7 +638,7 @@ w_app_f = self.space.wrap(app_f) assert isinstance(w_app_f.code, gateway.BuiltinCode2) - + called = [] fastcall_2 = w_app_f.code.fastcall_2 def witness_fastcall_2(space, w_func, w_a, w_b): @@ -768,7 +778,6 @@ class TestPassThroughArguments: - def test_pass_trough_arguments0(self): space = self.space @@ -866,7 +875,6 @@ class AppTestKeywordsToBuiltinSanity(object): - def test_type(self): class X(object): def __init__(self, **kw): @@ -905,4 +913,3 @@ d.update(**{clash: 33}) dict.update(d, **{clash: 33}) - diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -22,6 +22,7 @@ else: bases = [__base] self.bases = bases + self.heaptype = False self.hasdict = '__dict__' in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.get('__doc__', None) diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -21,9 +21,9 @@ assert isinstance(ast.__version__, str) def test_flags(self): - skip("broken") from copy_reg import _HEAPTYPE - assert self.ast.Module.__flags__ & _HEAPTYPE + assert self.ast.AST.__flags__ & _HEAPTYPE == 0 + assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE def test_build_ast(self): ast = self.ast @@ -239,19 +239,19 @@ x = ast.Num() assert x._fields == ('n',) exc = raises(AttributeError, getattr, x, 'n') - assert "Num' object has no attribute 'n'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'n'" x = ast.Num(42) assert x.n == 42 exc = raises(AttributeError, getattr, x, 'lineno') - assert "Num' object has no attribute 'lineno'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'lineno'" y = ast.Num() x.lineno = y assert x.lineno == y exc = raises(AttributeError, getattr, x, 'foobar') - assert "Num' object has no attribute 'foobar'" in exc.value.args[0] + assert str(exc.value) == "'Num' object has no attribute 'foobar'" x = ast.Num(lineno=2) assert x.lineno == 2 @@ -423,7 +423,7 @@ def test_issue1673_Num_fullinit(self): import ast - import copy + import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) assert num_node.n == num_node2.n diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -67,8 +67,7 @@ MiniBuffer.typedef = TypeDef( - "buffer", - __module__ = "_cffi_backend", + "_cffi_backend.buffer", __len__ = interp2app(MiniBuffer.descr_len), __getitem__ = interp2app(MiniBuffer.descr_getitem), __setitem__ = interp2app(MiniBuffer.descr_setitem), diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -441,7 +441,7 @@ W_CData.typedef = TypeDef( - 'CData', + '_cffi_backend.CData', __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -130,8 +130,7 @@ return self.ctitem.convert_to_object(result) W_CDataIter.typedef = TypeDef( - 'CDataIter', - __module__ = '_cffi_backend', + '_cffi_backend.CDataIter', __iter__ = interp2app(W_CDataIter.iter_w), __next__ = interp2app(W_CDataIter.next_w), ) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -210,8 +210,7 @@ W_CType.typedef = TypeDef( - 'CTypeDescr', - __module__ = '_cffi_backend', + '_cffi_backend.CTypeDescr', __repr__ = interp2app(W_CType.repr), __weakref__ = make_weakref_descr(W_CType), kind = GetSetProperty(W_CType.fget_kind, doc="kind"), diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -307,8 +307,7 @@ W_CField.typedef = TypeDef( - 'CField', - __module__ = '_cffi_backend', + '_cffi_backend.CField', type = interp_attrproperty('ctype', W_CField), offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -85,8 +85,7 @@ W_Library.typedef = TypeDef( - 'Library', - __module__ = '_cffi_backend', + '_cffi_backend.Library', __repr__ = interp2app(W_Library.repr), load_function = interp2app(W_Library.load_function), read_variable = interp2app(W_Library.read_variable), diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -463,11 +463,10 @@ W_Deque.__init__(space.interp_w(W_Deque, w_self), space) return w_self -W_Deque.typedef = TypeDef("deque", +W_Deque.typedef = TypeDef("collections.deque", __doc__ = """deque(iterable[, maxlen]) --> deque object Build an ordered collection accessible from endpoints only.""", - __module__ = '_collections', __new__ = interp2app(descr__new__), __init__ = interp2app(W_Deque.init), append = interp2app(W_Deque.append), diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -4,6 +4,8 @@ def test_basics(self): from _collections import deque + assert deque.__module__ == 'collections' + d = deque(range(-5125, -5000)) d.__init__(range(200)) for i in range(200, 400): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -136,8 +136,7 @@ W_Continulet.typedef = TypeDef( - 'continulet', - __module__ = '_continuation', + '_continuation.continulet', __new__ = interp2app(W_Continulet___new__), __init__ = interp2app(W_Continulet.descr_init), switch = interp2app(W_Continulet.descr_switch), diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -154,8 +154,7 @@ W_Dialect.typedef = TypeDef( - 'Dialect', - __module__ = '_csv', + '_csv.Dialect', __new__ = interp2app(W_Dialect___new__), delimiter = interp_attrproperty('delimiter', W_Dialect), diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -238,8 +238,7 @@ return W_Reader(space, dialect, w_iter) W_Reader.typedef = TypeDef( - 'reader', - __module__ = '_csv', + '_csv.reader', dialect = interp_attrproperty_w('dialect', W_Reader), line_num = interp_attrproperty('line_num', W_Reader), __iter__ = interp2app(W_Reader.iter_w), diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -160,8 +160,7 @@ return W_Writer(space, dialect, w_fileobj) W_Writer.typedef = TypeDef( - 'writer', - __module__ = '_csv', + '_csv.writer', dialect = interp_attrproperty_w('dialect', W_Writer), writerow = interp2app(W_Writer.writerow), writerows = interp2app(W_Writer.writerows), diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -217,17 +217,16 @@ return space.call_method(self.w_raw, "isatty") def repr_w(self, space): - typename = space.type(self).getname(space) - module = space.unicode_w(space.type(self).get_module()) + typename = space.type(self).name.decode('utf-8') try: w_name = space.getattr(self, space.wrap("name")) except OperationError, e: if not e.match(space, space.w_AttributeError): raise - return space.wrap(u"<%s.%s>" % (module, typename,)) + return space.wrap(u"<%s>" % (typename,)) else: name_repr = space.unicode_w(space.repr(w_name)) - return space.wrap(u"<%s.%s name=%s>" % (module, typename, name_repr)) + return space.wrap(u"<%s name=%s>" % (typename, name_repr)) # ______________________________________________ @@ -854,11 +853,10 @@ self.state = STATE_OK W_BufferedReader.typedef = TypeDef( - 'BufferedReader', W_BufferedIOBase.typedef, + '_io.BufferedReader', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedReader), __init__ = interp2app(W_BufferedReader.descr_init), __getstate__ = interp2app(W_BufferedReader.getstate_w), - __module__ = "_io", read = interp2app(W_BufferedReader.read_w), peek = interp2app(W_BufferedReader.peek_w), @@ -903,11 +901,10 @@ self.state = STATE_OK W_BufferedWriter.typedef = TypeDef( - 'BufferedWriter', W_BufferedIOBase.typedef, + '_io.BufferedWriter', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedWriter), __init__ = interp2app(W_BufferedWriter.descr_init), __getstate__ = interp2app(W_BufferedWriter.getstate_w), - __module__ = "_io", write = interp2app(W_BufferedWriter.write_w), flush = interp2app(W_BufferedWriter.flush_w), @@ -1028,11 +1025,10 @@ self.state = STATE_OK W_BufferedRandom.typedef = TypeDef( - 'BufferedRandom', W_BufferedIOBase.typedef, + '_io.BufferedRandom', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRandom), __init__ = interp2app(W_BufferedRandom.descr_init), __getstate__ = interp2app(W_BufferedRandom.getstate_w), - __module__ = "_io", read = interp2app(W_BufferedRandom.read_w), peek = interp2app(W_BufferedRandom.peek_w), diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -264,8 +264,7 @@ W_StringIO.typedef = TypeDef( - 'StringIO', W_TextIOBase.typedef, - __module__ = "_io", + '_io.StringIO', W_TextIOBase.typedef, __new__ = generic_new_descr(W_StringIO), __init__ = interp2app(W_StringIO.descr_init), __getstate__ = interp2app(W_StringIO.descr_getstate), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1049,13 +1049,12 @@ self.chunk_size = size W_TextIOWrapper.typedef = TypeDef( - 'TextIOWrapper', W_TextIOBase.typedef, + '_io.TextIOWrapper', W_TextIOBase.typedef, __new__ = generic_new_descr(W_TextIOWrapper), __init__ = interp2app(W_TextIOWrapper.descr_init), __repr__ = interp2app(W_TextIOWrapper.descr_repr), __next__ = interp2app(W_TextIOWrapper.next_w), __getstate__ = interp2app(W_TextIOWrapper.getstate_w), - __module__ = "_io", read = interp2app(W_TextIOWrapper.read_w), readline = interp2app(W_TextIOWrapper.readline_w), diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.get_module_type_name() + class_name = w_realclass.name else: name = '?' if class_name is None: diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -75,7 +75,6 @@ MultibyteIncrementalDecoder.typedef = TypeDef( 'MultibyteIncrementalDecoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbidecoder_new), decode = interp2app(MultibyteIncrementalDecoder.decode_w), reset = interp2app(MultibyteIncrementalDecoder.reset_w), @@ -124,7 +123,6 @@ MultibyteIncrementalEncoder.typedef = TypeDef( 'MultibyteIncrementalEncoder', - __module__ = '_multibytecodec', __new__ = interp2app(mbiencoder_new), encode = interp2app(MultibyteIncrementalEncoder.encode_w), reset = interp2app(MultibyteIncrementalEncoder.reset_w), diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -46,7 +46,6 @@ MultibyteCodec.typedef = TypeDef( 'MultibyteCodec', - __module__ = '_multibytecodec', decode = interp2app(MultibyteCodec.decode), encode = interp2app(MultibyteCodec.encode), ) diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -354,9 +354,8 @@ return bool(r) W_FileConnection.typedef = TypeDef( - 'Connection', W_BaseConnection.typedef, + '_multiprocessing.Connection', W_BaseConnection.typedef, __new__ = interp2app(W_FileConnection.descr_new_file.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_FileConnection.fileno), ) @@ -535,8 +534,7 @@ if sys.platform == 'win32': W_PipeConnection.typedef = TypeDef( - 'PipeConnection', W_BaseConnection.typedef, + '_multiprocessing.PipeConnection', W_BaseConnection.typedef, __new__ = interp2app(W_PipeConnection.descr_new_pipe.im_func), - __module__ = '_multiprocessing', fileno = interp2app(W_PipeConnection.fileno), ) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -612,8 +612,7 @@ method = getattr(W_RSocket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("socket", - __module__ = "_socket", +W_RSocket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -530,9 +530,8 @@ return space.wrap(s) W_ArrayBase.typedef = TypeDef( - 'array', + 'array.array', __new__ = interp2app(w_array), - __module__ = 'array', __len__ = interp2app(W_ArrayBase.descr_len), __eq__ = interp2app(W_ArrayBase.descr_eq), diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -1,8 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype # shared ll definitions -_C_OPAQUE_PTR = rffi.LONG -_C_OPAQUE_NULL = lltype.nullptr(rffi.LONGP.TO)# ALT: _C_OPAQUE_PTR.TO +_C_OPAQUE_PTR = rffi.ULONG +_C_OPAQUE_NULL = lltype.nullptr(rffi.ULONGP.TO)# ALT: _C_OPAQUE_PTR.TO C_SCOPE = _C_OPAQUE_PTR C_NULL_SCOPE = rffi.cast(C_SCOPE, _C_OPAQUE_NULL) diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -249,7 +249,7 @@ def activate_branch(space, w_branch): w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.int_w(space.call_method(w_branches, "GetEntriesFast"))): + for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): w_b = space.call_method(w_branches, "At", space.wrap(i)) activate_branch(space, w_b) space.call_method(w_branch, "SetStatus", space.wrap(1)) @@ -292,7 +292,7 @@ activate_branch(space, w_branch) # figure out from where we're reading - entry = space.int_w(space.call_method(w_self, "GetReadEntry")) + entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) if entry == -1: entry = 0 @@ -341,7 +341,7 @@ self.w_tree = w_tree self.current = 0 - self.maxentry = space.int_w(space.call_method(w_tree, "GetEntriesFast")) + self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) space = self.space = tree.space # holds the class cache in State space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -21,10 +21,11 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, l = 0, s = '', vp = rffi.cast(rffi.VOIDP, 0) ): - self._long = l + def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + self._handle = h + self._long = l self._string = s - self._voidp = vp + self._voidp = vp # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -57,7 +58,7 @@ if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): - misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._long), argtype.size) + misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) elif obj._voidp != rffi.cast(rffi.VOIDP, 0): data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp @@ -91,7 +92,7 @@ # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'long') + c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') c_scope = c_opaque_ptr c_type = c_scope @@ -116,6 +117,8 @@ c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') + c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') + self.capi_call_ifaces = { # name to opaque C++ scope representation 'num_scopes' : ([c_scope], c_int), @@ -152,7 +155,7 @@ 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer - 'allocate_function_args' : ([c_size_t], c_voidp), + 'allocate_function_args' : ([c_int], c_voidp), 'deallocate_function_args' : ([c_voidp], c_void), 'function_arg_sizeof' : ([], c_size_t), 'function_arg_typeoffset' : ([], c_size_t), @@ -169,7 +172,7 @@ 'base_name' : ([c_type, c_int], c_ccharp), 'is_subtype' : ([c_type, c_type], c_int), - 'base_offset' : ([c_type, c_type, c_object, c_int], c_long), + 'base_offset' : ([c_type, c_type, c_object, c_int], c_ptrdiff_t), # method/function reflection information 'num_methods' : ([c_scope], c_int), @@ -199,7 +202,7 @@ 'num_datamembers' : ([c_scope], c_int), 'datamember_name' : ([c_scope, c_int], c_ccharp), 'datamember_type' : ([c_scope, c_int], c_ccharp), - 'datamember_offset' : ([c_scope, c_int], c_size_t), + 'datamember_offset' : ([c_scope, c_int], c_ptrdiff_t), 'datamember_index' : ([c_scope, c_ccharp], c_int), @@ -259,10 +262,13 @@ return c_call.ctype.rcall(c_call._cdata, args) def _cdata_to_cobject(space, w_cdata): - return rffi.cast(C_OBJECT, space.int_w(w_cdata)) + return rffi.cast(C_OBJECT, space.uint_w(w_cdata)) def _cdata_to_size_t(space, w_cdata): - return rffi.cast(rffi.SIZE_T, space.int_w(w_cdata)) + return rffi.cast(rffi.SIZE_T, space.uint_w(w_cdata)) + +def _cdata_to_ptrdiff_t(space, w_cdata): + return rffi.cast(rffi.LONG, space.int_w(w_cdata)) def _cdata_to_ptr(space, w_cdata): # TODO: this is both a hack and dreadfully slow return rffi.cast(rffi.VOIDP, @@ -273,74 +279,74 @@ # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(l=cppscope.handle), _Arg(l=iscope)] + args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.int_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) def c_get_template(space, name): - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(l=cppclass.handle), _Arg(l=cppobj)] - return rffi.cast(C_TYPE, space.int_w(call_capi(space, 'actual_class', args))) + args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(l=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(l=cppclass.handle), _Arg(l=cppobject)]) + call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.UCHAR, space.c_int_w(call_capi(space, 'call_b', args))) + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return call_capi(space, 'call_s', args) def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(l=cppmethod), _Arg(l=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(l=cppclass.handle)] + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return rffi.cast(C_METHPTRGETTER_PTR, _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) @@ -358,47 +364,47 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(l=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) def c_is_enum(space, name): return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(l=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(l=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(l=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(l=cppclass.handle), _Arg(l=base_index)] + args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(l=derived.handle), _Arg(l=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(l=derived_h), _Arg(l=base_h), _Arg(l=address), _Arg(l=direction)] - return _cdata_to_size_t(space, call_capi(space, 'base_offset', args)) + args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: - return rffi.cast(rffi.SIZE_T, 0) + return rffi.cast(rffi.LONG, 0) return _c_base_offset(space, derived.handle, base.handle, address, direction) def c_base_offset1(space, derived_h, base, address, direction): return _c_base_offset(space, derived_h, base.handle, address, direction) # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(l=cppscope.handle)] + args = [_Arg(h=cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(l=cppscope.handle), _Arg(l=imethod)] + args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -414,36 +420,36 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(l=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] + args = [_Arg(h=cppscope.handle), _Arg(l=index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(l=cppscope.handle) + arg1 = _Arg(h=cppscope.handle) arg2 = _Arg(l=index) args = [c_resolve_name(space, charp2str_free(space, call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) @@ -451,45 +457,45 @@ return args def c_get_method(space, cppscope, index): - args = [_Arg(l=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHOD, space.int_w(call_capi(space, 'get_method', args))) + args = [_Arg(h=cppscope.handle), _Arg(l=index)] + return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(l=nss.handle), _Arg(l=lc.handle), _Arg(l=rc.handle), _Arg(s=op)] + args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(l=cppclass.handle), _Arg(l=index)] + args = [_Arg(h=cppclass.handle), _Arg(l=index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(l=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] - return _cdata_to_size_t(space, call_capi(space, 'datamember_offset', args)) + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(l=cppscope.handle), _Arg(s=name)] + args = [_Arg(h=cppscope.handle), _Arg(s=name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(l=cppscope.handle), _Arg(l=datamember_index)] + args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- @@ -509,7 +515,7 @@ def c_charp2stdstring(space, svalue): return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(l=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) # loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) def register_pythonizations(space): diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,7 @@ try: # TODO: accept a 'capsule' rather than naked int # (do accept int(0), though) - obj = rffi.cast(rffi.VOIDP, space.int_w(w_obj)) + obj = rffi.cast(rffi.VOIDP, space.uint_w(w_obj)) except Exception: obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -102,7 +102,7 @@ _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] libffitype = jit_libffi.types.slong - c_type = rffi.LONG + c_type = rffi.LONG c_ptrtype = rffi.LONGP def _unwrap_object(self, space, w_obj): diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -7,10 +7,10 @@ extern "C" { #endif // ifdef __cplusplus - typedef long cppyy_scope_t; + typedef unsigned long cppyy_scope_t; typedef cppyy_scope_t cppyy_type_t; - typedef long cppyy_object_t; - typedef long cppyy_method_t; + typedef unsigned long cppyy_object_t; + typedef unsigned long cppyy_method_t; typedef long cppyy_index_t; typedef void* (*cppyy_methptrgetter_t)(cppyy_object_t); @@ -48,7 +48,7 @@ cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_scope_t scope, cppyy_index_t idx); /* handling of function argument buffer ----------------------------------- */ - void* cppyy_allocate_function_args(size_t nargs); + void* cppyy_allocate_function_args(int nargs); void cppyy_deallocate_function_args(void* args); size_t cppyy_function_arg_sizeof(); size_t cppyy_function_arg_typeoffset(); @@ -66,7 +66,7 @@ int cppyy_is_subtype(cppyy_type_t derived, cppyy_type_t base); /* calculate offsets between declared and actual type, up-cast: direction > 0; down-cast: direction < 0 */ - size_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); + ptrdiff_t cppyy_base_offset(cppyy_type_t derived, cppyy_type_t base, cppyy_object_t address, int direction); /* method/function reflection information --------------------------------- */ int cppyy_num_methods(cppyy_scope_t scope); @@ -97,7 +97,7 @@ int cppyy_num_datamembers(cppyy_scope_t scope); char* cppyy_datamember_name(cppyy_scope_t scope, int datamember_index); char* cppyy_datamember_type(cppyy_scope_t scope, int datamember_index); - size_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); + ptrdiff_t cppyy_datamember_offset(cppyy_scope_t scope, int datamember_index); int cppyy_datamember_index(cppyy_scope_t scope, const char* name); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -593,7 +593,7 @@ @unwrap_spec(args_w='args_w') def call(self, w_cppinstance, args_w): w_result = W_CPPOverload.call(self, w_cppinstance, args_w) - newthis = rffi.cast(capi.C_OBJECT, self.space.int_w(w_result)) + newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result)) cppinstance = self.space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=True) if cppinstance is not None: cppinstance._rawobject = newthis diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -520,12 +520,12 @@ /* handling of function argument buffer ----------------------------------- */ -void* cppyy_allocate_function_args(size_t nargs) { +void* cppyy_allocate_function_args(int nargs) { assert(sizeof(CPPYY_G__value) == sizeof(G__value)); G__param* libp = (G__param*)malloc( offsetof(G__param, para) + nargs*sizeof(CPPYY_G__value)); libp->paran = (int)nargs; - for (size_t i = 0; i < nargs; ++i) + for (int i = 0; i < nargs; ++i) libp->para[i].type = 'l'; return (void*)libp->para; } @@ -613,7 +613,7 @@ return derived_type->GetBaseClass(base_type) != 0; } -size_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, +ptrdiff_t cppyy_base_offset(cppyy_type_t derived_handle, cppyy_type_t base_handle, cppyy_object_t address, int /* direction */) { R__LOCKGUARD2(gCINTMutex); @@ -642,7 +642,7 @@ } } - return (size_t) offset; // may be negative (will roll over) + return (ptrdiff_t) offset; // may be negative (will roll over) } @@ -941,16 +941,16 @@ return cppstring_to_cstring(gbl.GetFullTypeName()); } -size_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { +ptrdiff_t cppyy_datamember_offset(cppyy_scope_t handle, int datamember_index) { R__LOCKGUARD2(gCINTMutex); TClassRef& cr = type_from_handle(handle); if (cr.GetClass()) { TDataMember* m = (TDataMember*)cr->GetListOfDataMembers()->At(datamember_index); - return (size_t)m->GetOffsetCint(); From noreply at buildbot.pypy.org Mon May 5 21:08:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 5 May 2014 21:08:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix translation Message-ID: <20140505190811.CB5C21C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71281:a36e3abccfce Date: 2014-05-05 12:06 -0700 http://bitbucket.org/pypy/pypy/changeset/a36e3abccfce/ Log: fix translation diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -199,7 +199,7 @@ if isinstance(w_type, W_TypeObject): w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): - class_name = w_realclass.name + class_name = w_realclass.name.decode('utf-8') else: name = '?' if class_name is None: From noreply at buildbot.pypy.org Mon May 5 21:08:14 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 5 May 2014 21:08:14 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140505190814.15CF71C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71282:1abe6970b0e5 Date: 2014-05-05 12:06 -0700 http://bitbucket.org/pypy/pypy/changeset/1abe6970b0e5/ Log: merge default diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -33,7 +33,7 @@ .. _`Py3k`: http://pypy.org/py3donate.html .. _`STM`: http://pypy.org/tmdonate2.html -.. _ `Numpy`: http://pypy.org/numpydonate.html +.. _ `NumPy`: http://pypy.org/numpydonate.html .. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html .. _`CFFI`: http://cffi.readthedocs.org .. _`cryptography`: https://cryptography.io @@ -66,8 +66,9 @@ Bugfixes -------- -Many issues were cleaned up after being reported by users to https://bugs.pypy.org (ignore the bad SSL certificate) or on IRC at #pypy. Note that we consider -performance slowdowns as bugs. +Many issues were cleaned up after being reported by users to https://bugs.pypy.org or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. Here is a summary of the user-facing changes; +for more information see `whats-new`_: * The ARM port no longer crashes on unaligned memory access to floats and doubles, and singlefloats are supported in the JIT. @@ -83,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a rpython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -92,6 +93,8 @@ * Fix handling of tp_name for type objects .. _`HippyVM`: http://www.hippyvm.com +.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html + New Platforms and Features -------------------------- @@ -99,18 +102,18 @@ * Support for OpenBSD * Code cleanup: we continue to prune out old and unused code, and to refactor - large parts of the codebase. We have separated rpython from the PyPy python - interpreter, and rpython is seeing use in other dynamic language projects. + large parts of the codebase. We have separated RPython from the PyPy python + interpreter, and RPython is seeing use in other dynamic language projects. * Support for precompiled headers in the build process for MSVC * Tweak support of errno in cpyext (the PyPy implemenation of the capi) -Numpy +NumPy ----- -Numpy support has been split into a builtin ``_numpy`` module and a -fork of the numpy code base adapted to pypy at +NumPy support has been split into a builtin ``_numpy`` module and a +fork of the NumPy code base adapted to PyPy at ``https://bitbucket.org/pypy/numpy``. You need to install NumPy separately with a virtualenv: ``pip install git+https://bitbucket.org/pypy/numpy.git``; @@ -120,20 +123,20 @@ * NumPy support has been improved, many failures in indexing, dtypes, and scalars were corrected. We are slowly approaching our goal of passing - the numpy test suite. We still do not support object or unicode ndarrays. + the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the numpy c +* speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should - be apparent in all Numpy functionality. + be apparent in all NumPy functionality. * Most of the core functionality of nditer has been implemented. -* A cffi-based ``numpy.random`` module is available as a branch in the numpy - repository, it will be merged soon after this release. +* A cffi-based ``numpy.random`` module is available as a branch; + it will be merged soon after this release. * enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load - memory operations used in numpy arrays. Further work remains here in virtualizing the + memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -1,5 +1,5 @@ ======================= -What's new in PyPy 2.2+ +What's new since PyPy 2.2.1? ======================= .. this is a revision shortly after release-2.2.x diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -656,6 +656,15 @@ data[index] = char array._charbuf_stop() + def getslice(self, start, stop, step, size): + if step == 1: + data = self.array._charbuf_start() + try: + return rffi.charpsize2str(rffi.ptradd(data, start), size) + finally: + self.array._charbuf_stop() + return Buffer.getslice(self, start, stop, step, size) + def get_raw_address(self): return self.array._charbuf_start() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -903,7 +903,9 @@ ("SIZEOF_TIME_T", rffi.TIME_T), ("SIZEOF_LONG", rffi.LONG), ("SIZEOF_SHORT", rffi.SHORT), - ("SIZEOF_INT", rffi.INT) + ("SIZEOF_INT", rffi.INT), + ("SIZEOF_FLOAT", rffi.FLOAT), + ("SIZEOF_DOUBLE", rffi.DOUBLE), ]: pypy_macros.append("#define %s %s" % (macro_name, rffi.sizeof(size))) pypy_macros.append('') diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -40,10 +40,10 @@ self.check_valid() return self.space.wrapbytes(self.mmap.read(num)) - @unwrap_spec(tofind='bufferstr') - def find(self, tofind, w_start=None, w_end=None): + def find(self, w_tofind, w_start=None, w_end=None): self.check_valid() space = self.space + tofind = space.getarg_w('s#', w_tofind) if w_start is None: start = self.mmap.pos else: @@ -54,10 +54,10 @@ end = space.getindex_w(w_end, None) return space.wrap(self.mmap.find(tofind, start, end)) - @unwrap_spec(tofind='bufferstr') - def rfind(self, tofind, w_start=None, w_end=None): + def rfind(self, w_tofind, w_start=None, w_end=None): self.check_valid() space = self.space + tofind = space.getarg_w('s#', w_tofind) if w_start is None: start = self.mmap.pos else: @@ -87,9 +87,9 @@ except OSError, e: raise mmap_error(self.space, e) - @unwrap_spec(data='bufferstr') - def write(self, data): + def write(self, w_data): self.check_valid() + data = self.space.getarg_w('s#', w_data) self.check_writeable() try: self.mmap.write(data) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -132,12 +132,13 @@ else: return space.wrapbytes(s) - at unwrap_spec(fd=c_int, data='bufferstr') -def write(space, fd, data): + at unwrap_spec(fd=c_int) +def write(space, fd, w_data): """Write a string to a file descriptor. Return the number of bytes actually written, which may be smaller than len(data).""" + data = space.getarg_w('s*', w_data) try: - res = os.write(fd, data) + res = os.write(fd, data.as_str()) except OSError, e: raise wrap_oserror(space, e) else: diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -25,3 +25,38 @@ guard_true(i69, descr=...) --TICK-- """) + + def test_struct_unpack(self): + def main(n): + import struct + import array + a = array.array('c', struct.pack('i', 42)) + i = 0 + while i < n: + i += 1 + struct.unpack('i', a) # ID: unpack + return i + log = self.run(main, [1000]) + assert log.result == 1000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('unpack', """ + guard_not_invalidated(descr=...) + p90 = newstr(4) + call(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) + guard_no_exception(descr=...) + i91 = strgetitem(p90, 0) + i92 = strgetitem(p90, 1) + i93 = int_lshift(i92, 8) + i94 = int_or(i91, i93) + i95 = strgetitem(p90, 2) + i96 = int_lshift(i95, 16) + i97 = int_or(i94, i96) + i98 = strgetitem(p90, 3) + i99 = int_ge(i98, 128) + guard_false(i99, descr=...) + i100 = int_lshift(i98, 24) + i101 = int_or(i97, i100) + i102 = getfield_raw(50657056, descr=) + i103 = int_lt(i102, 0) + guard_false(i103, descr=...) + """) diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -8,7 +8,6 @@ class PackFormatIterator(FormatIterator): - def __init__(self, space, args_w, size): self.space = space self.args_w = args_w @@ -95,11 +94,11 @@ class UnpackFormatIterator(FormatIterator): - - def __init__(self, space, input): + def __init__(self, space, buf): self.space = space - self.input = input - self.inputpos = 0 + self.buf = buf + self.length = buf.getlength() + self.pos = 0 self.result_w = [] # list of wrapped objects # See above comment on operate. @@ -114,18 +113,18 @@ _operate_is_specialized_ = True def align(self, mask): - self.inputpos = (self.inputpos + mask) & ~mask + self.pos = (self.pos + mask) & ~mask def finished(self): - if self.inputpos != len(self.input): + if self.pos != self.length: raise StructError("unpack str size too long for format") def read(self, count): - end = self.inputpos + count - if end > len(self.input): + end = self.pos + count + if end > self.length: raise StructError("unpack str size too short for format") - s = self.input[self.inputpos : end] - self.inputpos = end + s = self.buf.getslice(self.pos, end, 1, count) + self.pos = end return s @specialize.argtype(1) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,4 +1,5 @@ from rpython.rlib import jit +from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator from rpython.tool.sourcetools import func_with_new_name @@ -65,9 +66,8 @@ buf.setslice(offset, res) - at unwrap_spec(format=str, input='bufferstr') -def unpack(space, format, input): - fmtiter = UnpackFormatIterator(space, input) +def _unpack(space, format, buf): + fmtiter = UnpackFormatIterator(space, buf) try: fmtiter.interpret(format) except StructOverflowError, e: @@ -83,11 +83,16 @@ # No cache in this implementation -# XXX inefficient + at unwrap_spec(format=str) +def unpack(space, format, w_str): + buf = space.getarg_w('s*', w_str) + return _unpack(space, format, buf) + + @unwrap_spec(format=str, offset=int) -def unpack_from(space, format, w_buf, offset=0): +def unpack_from(space, format, w_buffer, offset=0): size = _calcsize(space, format) - buf = space.getarg_w('z*', w_buf) + buf = space.getarg_w('z*', w_buffer) if buf is None: w_module = space.getbuiltinmodule('struct') w_error = space.getattr(w_module, space.wrap('error')) @@ -100,8 +105,8 @@ raise oefmt(w_error, "unpack_from requires a buffer of at least %d bytes", size) - data = buf.getslice(offset, offset + size, 1, size) - return unpack(space, format, data) + buf = SubBuffer(buf, offset, size) + return _unpack(space, format, buf) class W_Struct(W_Root): diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -353,7 +353,7 @@ result = description.FunctionDesc(self, pyobj) elif isinstance(pyobj, (type, types.ClassType)): if pyobj is object: - raise Exception, "ClassDesc for object not supported" + raise Exception("ClassDesc for object not supported") if pyobj.__module__ == '__builtin__': # avoid making classdefs for builtin types result = self.getfrozen(pyobj) else: @@ -591,7 +591,7 @@ for name, value in dict.iteritems(): if value is func: return cls, name - raise Exception, "could not match bound-method to attribute name: %r" % (boundmeth,) + raise Exception("could not match bound-method to attribute name: %r" % (boundmeth,)) def ishashable(x): try: diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -65,14 +65,14 @@ s_start, s_stop = args[:2] s_step = args[2] else: - raise Exception, "range() takes 1 to 3 arguments" + raise Exception("range() takes 1 to 3 arguments") empty = False # so far if not s_step.is_constant(): step = 0 # this case signals a variable step else: step = s_step.const if step == 0: - raise Exception, "range() with step zero" + raise Exception("range() with step zero") if s_start.is_constant() and s_stop.is_constant(): try: if len(xrange(s_start.const, s_stop.const, step)) == 0: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -394,7 +394,7 @@ return SomePBC([subdef.classdesc for subdef in self.getallsubdefs()]) def _freeze_(self): - raise Exception, "ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly" + raise Exception("ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly") # ____________________________________________________________ diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -30,7 +30,7 @@ except (KeyboardInterrupt, SystemExit): raise except: - raise Exception, "broken specialize directive parms: %s" % directive + raise Exception("broken specialize directive parms: %s" % directive) name = name.replace(':', '__') try: specializer = getattr(pol, name) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1435,7 +1435,7 @@ elif a==2: raise X(1) elif a==3: - raise X,4 + raise X(4) else: try: l[0] @@ -3628,7 +3628,7 @@ def f(): e = OverflowError() lle = cast_instance_to_base_ptr(e) - raise Exception, lle + raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() py.test.raises(AssertionError, a.build_types, f, []) diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -7,7 +7,6 @@ import __builtin__ from rpython.tool.error import source_lines -from rpython.tool.stdlib_opcode import host_bytecode_spec from rpython.rlib import rstackovf from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, @@ -305,8 +304,6 @@ ] class FlowContext(object): - opcode_method_names = host_bytecode_spec.method_names - def __init__(self, graph, code): self.graph = graph func = graph.func diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -1,7 +1,7 @@ """ Buffer protocol support. """ -from rpython.rlib.objectmodel import import_from_mixin +from rpython.rlib import jit class Buffer(object): @@ -61,7 +61,7 @@ if step == 1: assert 0 <= start <= stop return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) + return Buffer.getslice(self, start, stop, step, size) class SubBuffer(Buffer): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -593,7 +593,7 @@ def can_enter_jit(_self, **livevars): if _self.autoreds: - raise TypeError, "Cannot call can_enter_jit on a driver with reds='auto'" + raise TypeError("Cannot call can_enter_jit on a driver with reds='auto'") # special-cased by ExtRegistryEntry if _self.check_untranslated: _self._check_arguments(livevars, False) diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -109,11 +109,11 @@ def _check_type(TYPE): if isinstance(TYPE, lltype.Ptr): if TYPE.TO._gckind != 'raw': - raise TypeError, "Can only push raw values to C, not 'gc'" + raise TypeError("Can only push raw values to C, not 'gc'") # XXX probably we should recursively check for struct fields here, # lets just ignore that for now if isinstance(TYPE.TO, lltype.Array) and 'nolength' not in TYPE.TO._hints: - raise TypeError, "Can only push to C arrays without length info" + raise TypeError("Can only push to C arrays without length info") class ArgChain(object): @@ -136,7 +136,7 @@ elif TYPE is rffi.FLOAT: cls = SingleFloatArg else: - raise TypeError, 'Unsupported argument type: %s' % TYPE + raise TypeError('Unsupported argument type: %s' % TYPE) self._append(cls(val)) return self @@ -247,8 +247,8 @@ # assuming that argchain is completely virtual. self = jit.promote(self) if argchain.numargs != len(self.argtypes): - raise TypeError, 'Wrong number of arguments: %d expected, got %d' %\ - (len(self.argtypes), argchain.numargs) + raise TypeError('Wrong number of arguments: %d expected, got %d' % + (len(self.argtypes), argchain.numargs)) ll_args = self._prepare() i = 0 arg = argchain.first @@ -273,7 +273,7 @@ elif RESULT is lltype.Void: return self._do_call_void(self.funcsym, ll_args) else: - raise TypeError, 'Unsupported result type: %s' % RESULT + raise TypeError('Unsupported result type: %s' % RESULT) # return rffi.cast(RESULT, res) @@ -430,7 +430,7 @@ def getpointer_by_ordinal(self, name, argtypes, restype, flags=FUNCFLAG_CDECL): - return Func('by_ordinal', argtypes, restype, + return Func('by_ordinal', argtypes, restype, dlsym_byordinal(self.lib, name), flags=flags, keepalive=self) def getaddressindll(self, name): diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -122,7 +122,7 @@ """ typecheck = kwds.pop('typecheck', True) if types_ and kwds: - raise TypeError, 'Cannot mix positional arguments and keywords' + raise TypeError('Cannot mix positional arguments and keywords') if not typecheck: def decorator(f): @@ -177,7 +177,7 @@ if not s_expected.contains(s_argtype): msg = "%s argument %r must be of type %s" % ( f.func_name, srcargs[i], expected_type) - raise TypeError, msg + raise TypeError(msg) # template = """ def {name}({arglist}): @@ -576,7 +576,7 @@ # ____________________________________________________________ def hlinvoke(repr, llcallable, *args): - raise TypeError, "hlinvoke is meant to be rtyped and not called direclty" + raise TypeError("hlinvoke is meant to be rtyped and not called direclty") def invoke_around_extcall(before, after): """Call before() before any external function call, and after() after. diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -173,7 +173,7 @@ if type(r) is long and not is_valid_int(r): # checks only if applicable to r's type. # this happens in the garbage collector. - raise OverflowError, "signed integer expression did overflow" + raise OverflowError("signed integer expression did overflow") return r # Strange things happening for float to int on 64 bit: @@ -213,7 +213,7 @@ return other_type if self_type.SIGNED == other_type.SIGNED: return build_int(None, self_type.SIGNED, max(self_type.BITS, other_type.BITS)) - raise AssertionError, "Merging these types (%s, %s) is not supported" % (self_type, other_type) + raise AssertionError("Merging these types (%s, %s) is not supported" % (self_type, other_type)) def signedtype(t): if t in (bool, int, long): diff --git a/rpython/rlib/rsre/rpy/sre_compile.py b/rpython/rlib/rsre/rpy/sre_compile.py --- a/rpython/rlib/rsre/rpy/sre_compile.py +++ b/rpython/rlib/rsre/rpy/sre_compile.py @@ -63,7 +63,7 @@ emit(OPCODES[ANY]) elif op in REPEATING_CODES: if flags & SRE_FLAG_TEMPLATE: - raise error, "internal: unsupported template operator" + raise error("internal: unsupported template operator") emit(OPCODES[REPEAT]) skip = _len(code); emit(0) emit(av[0]) @@ -112,7 +112,7 @@ else: lo, hi = av[1].getwidth() if lo != hi: - raise error, "look-behind requires fixed-width pattern" + raise error("look-behind requires fixed-width pattern") emit(lo) # look behind _compile(code, av[1], flags) emit(OPCODES[SUCCESS]) @@ -173,7 +173,7 @@ else: code[skipyes] = _len(code) - skipyes + 1 else: - raise ValueError, ("unsupported operand type", op) + raise ValueError("unsupported operand type", op) def _compile_charset(charset, flags, code, fixup=None): # compile charset subprogram @@ -201,7 +201,7 @@ else: emit(CHCODES[av]) else: - raise error, "internal: unsupported set operator" + raise error("internal: unsupported set operator") emit(OPCODES[FAILURE]) def _optimize_charset(charset, fixup): diff --git a/rpython/rlib/rsre/rpy/sre_parse.py b/rpython/rlib/rsre/rpy/sre_parse.py --- a/rpython/rlib/rsre/rpy/sre_parse.py +++ b/rpython/rlib/rsre/rpy/sre_parse.py @@ -75,7 +75,7 @@ if name is not None: ogid = self.groupdict.get(name, None) if ogid is not None: - raise error, ("redefinition of group name %s as group %d; " + raise error("redefinition of group name %s as group %d; " "was group %d" % (repr(name), gid, ogid)) self.groupdict[name] = gid self.open.append(gid) @@ -188,7 +188,7 @@ try: c = self.string[self.index + 1] except IndexError: - raise error, "bogus escape (end of line)" + raise error("bogus escape (end of line)") char = char + c self.index = self.index + len(char) self.next = char @@ -238,7 +238,7 @@ escape = escape + source.get() escape = escape[2:] if len(escape) != 2: - raise error, "bogus escape: %s" % repr("\\" + escape) + raise error("bogus escape: %s" % repr("\\" + escape)) return LITERAL, int(escape, 16) & 0xff elif c in OCTDIGITS: # octal escape (up to three digits) @@ -247,12 +247,12 @@ escape = escape[1:] return LITERAL, int(escape, 8) & 0xff elif c in DIGITS: - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) def _escape(source, escape, state): # handle escape code in expression @@ -289,14 +289,14 @@ group = int(escape[1:]) if group < state.groups: if not state.checkgroup(group): - raise error, "cannot refer to open group" + raise error("cannot refer to open group") return GROUPREF, group raise ValueError if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) def _parse_sub(source, state, nested=1): # parse an alternation: a|b|c @@ -313,7 +313,7 @@ if not source.next or sourcematch(")", 0): break else: - raise error, "pattern not properly closed" + raise error("pattern not properly closed") if len(items) == 1: return items[0] @@ -362,11 +362,11 @@ if source.match("|"): item_no = _parse(source, state) if source.match("|"): - raise error, "conditional backref with more than two branches" + raise error("conditional backref with more than two branches") else: item_no = None if source.next and not source.match(")", 0): - raise error, "pattern not properly closed" + raise error("pattern not properly closed") subpattern = SubPattern(state) subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) return subpattern @@ -431,7 +431,7 @@ elif this: code1 = LITERAL, ord(this) else: - raise error, "unexpected end of regular expression" + raise error("unexpected end of regular expression") if sourcematch("-"): # potential range this = sourceget() @@ -447,14 +447,14 @@ else: code2 = LITERAL, ord(this) if code1[0] != LITERAL or code2[0] != LITERAL: - raise error, "bad character range" + raise error("bad character range") lo = code1[1] hi = code2[1] if hi < lo: - raise error, "bad character range" + raise error("bad character range") setappend((RANGE, (lo, hi))) else: - raise error, "unexpected end of regular expression" + raise error("unexpected end of regular expression") else: if code1[0] is IN: code1 = code1[1][0] @@ -507,16 +507,16 @@ if max < min: raise error("bad repeat interval") else: - raise error, "not supported" + raise error("not supported") # figure out which item to repeat if subpattern: item = subpattern[-1:] else: item = None if not item or (_len(item) == 1 and item[0][0] == AT): - raise error, "nothing to repeat" + raise error("nothing to repeat") if item[0][0] in REPEATCODES: - raise error, "multiple repeat" + raise error("multiple repeat") if sourcematch("?"): subpattern[-1] = (MIN_REPEAT, (min, max, item)) else: @@ -540,7 +540,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ">": break name = name + char @@ -556,7 +556,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ")": break name = name + char @@ -567,14 +567,14 @@ "%r" % name) gid = state.groupdict.get(name) if gid is None: - raise error, "unknown group name" + raise error("unknown group name") subpatternappend((GROUPREF, gid)) continue else: char = sourceget() if char is None: - raise error, "unexpected end of pattern" - raise error, "unknown specifier: ?P%s" % char + raise error("unexpected end of pattern") + raise error("unknown specifier: ?P%s" % char) elif sourcematch(":"): # non-capturing group group = 2 @@ -585,7 +585,7 @@ break sourceget() if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") continue elif source.next in ASSERTCHARS: # lookahead assertions @@ -593,12 +593,12 @@ dir = 1 if char == "<": if source.next not in LOOKBEHINDASSERTCHARS: - raise error, "syntax error" + raise error("syntax error") dir = -1 # lookbehind char = sourceget() p = _parse_sub(source, state) if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") if char == "=": subpatternappend((ASSERT, (dir, p))) else: @@ -610,7 +610,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ")": break condname = condname + char @@ -620,16 +620,16 @@ if isname(condname): condgroup = state.groupdict.get(condname) if condgroup is None: - raise error, "unknown group name" + raise error("unknown group name") else: try: condgroup = int(condname) except ValueError: - raise error, "bad character in group name" + raise error("bad character in group name") else: # flags if not source.next in FLAGS: - raise error, "unexpected end of pattern" + raise error("unexpected end of pattern") while source.next in FLAGS: state.flags = state.flags | FLAGS[sourceget()] if group: @@ -644,7 +644,7 @@ else: p = _parse_sub(source, state) if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") if group is not None: state.closegroup(group) subpatternappend((SUBPATTERN, (group, p))) @@ -652,10 +652,10 @@ while 1: char = sourceget() if char is None: - raise error, "unexpected end of pattern" + raise error("unexpected end of pattern") if char == ")": break - raise error, "unknown extension" + raise error("unknown extension") elif this == "^": subpatternappend((AT, AT_BEGINNING)) @@ -668,7 +668,7 @@ subpatternappend(code) else: - raise error, "parser error" + raise error("parser error") return subpattern @@ -686,9 +686,9 @@ tail = source.get() if tail == ")": - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") elif tail: - raise error, "bogus characters at end of regular expression" + raise error("bogus characters at end of regular expression") if flags & SRE_FLAG_DEBUG: p.dump() @@ -730,23 +730,23 @@ while 1: char = sget() if char is None: - raise error, "unterminated group name" + raise error("unterminated group name") if char == ">": break name = name + char if not name: - raise error, "missing group name" + raise error("missing group name") try: index = int(name) if index < 0: - raise error, "negative group number" + raise error("negative group number") except ValueError: if not isname(name): - raise error, "bad character in group name" + raise error("bad character in group name") try: index = pattern.groupindex[name] except KeyError: - raise IndexError, "unknown group name" + raise IndexError("unknown group name") a((MARK, index)) elif c == "0": if s.next in OCTDIGITS: @@ -796,7 +796,7 @@ for index, group in groups: literals[index] = s = g(group) if s is None: - raise error, "unmatched group" + raise error("unmatched group") except IndexError: - raise error, "invalid group reference" + raise error("invalid group reference") return sep.join(literals) diff --git a/rpython/rlib/rstruct/formatiterator.py b/rpython/rlib/rstruct/formatiterator.py --- a/rpython/rlib/rstruct/formatiterator.py +++ b/rpython/rlib/rstruct/formatiterator.py @@ -82,6 +82,7 @@ def finished(self): pass + class CalcSizeFormatIterator(FormatIterator): totalsize = 0 diff --git a/rpython/rlib/rzipfile.py b/rpython/rlib/rzipfile.py --- a/rpython/rlib/rzipfile.py +++ b/rpython/rlib/rzipfile.py @@ -214,7 +214,7 @@ def _GetContents(self, fp): endrec = _EndRecData(fp) if not endrec: - raise BadZipfile, "File is not a zip file" + raise BadZipfile("File is not a zip file") size_cd = endrec.stuff[5] # bytes in central directory offset_cd = endrec.stuff[6] # offset of central directory self.comment = endrec.comment @@ -227,7 +227,7 @@ centdir = fp.read(46) total = total + 46 if centdir[0:4] != stringCentralDir: - raise BadZipfile, "Bad magic number for central directory" + raise BadZipfile("Bad magic number for central directory") centdir = runpack(structCentralDir, centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information @@ -255,7 +255,7 @@ fp.seek(data.header_offset, 0) fheader = fp.read(30) if fheader[0:4] != stringFileHeader: - raise BadZipfile, "Bad magic number for file header" + raise BadZipfile("Bad magic number for file header") fheader = runpack(structFileHeader, fheader) # file_offset is computed here, since the extra field for # the central directory and for the local file header @@ -266,9 +266,8 @@ + fheader[_FH_EXTRA_FIELD_LENGTH]) fname = fp.read(fheader[_FH_FILENAME_LENGTH]) if fname != data.orig_filename: - raise BadZipfile, \ - 'File name in directory "%s" and header "%s" differ.' % ( - data.orig_filename, fname) + raise BadZipfile('File name in directory "%s" and ' + 'header "%s" differ.' % (data.orig_filename, fname)) fp.seek(self.start_dir, 0) def getinfo(self, filename): @@ -296,15 +295,13 @@ finally: rzlib.inflateEnd(stream) elif zinfo.compress_type == ZIP_DEFLATED: - raise BadZipfile, \ - "Cannot decompress file, zlib not installed" + raise BadZipfile("Cannot decompress file, zlib not installed") else: - raise BadZipfile, \ - "Unsupported compression method %d for file %s" % \ - (zinfo.compress_type, filename) + raise BadZipfile("Unsupported compression method %d for " + "file %s" % (zinfo.compress_type, filename)) crc = crc32(bytes) if crc != zinfo.CRC: - raise BadZipfile, "Bad CRC-32 for file %s" % filename + raise BadZipfile("Bad CRC-32 for file %s" % filename) return bytes finally: fp.close() diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -6,4 +6,5 @@ assert buf.getitem(4) == 'o' assert buf.getlength() == 11 assert buf.getslice(1, 6, 1, 5) == 'ello ' + assert buf.getslice(1, 6, 2, 3) == 'el ' assert buf.as_str() == 'hello world' diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -95,7 +95,7 @@ elif whence == 2: offset += len(self.buf) else: - raise ValueError, "whence should be 0, 1 or 2" + raise ValueError("whence should be 0, 1 or 2") if offset < 0: offset = 0 self.pos = offset diff --git a/rpython/rlib/unicodedata/unicodedb_5_2_0.py b/rpython/rlib/unicodedata/unicodedb_5_2_0.py --- a/rpython/rlib/unicodedata/unicodedb_5_2_0.py +++ b/rpython/rlib/unicodedata/unicodedb_5_2_0.py @@ -39,7 +39,7 @@ charnode = left else: charnode = right - raise KeyError, name + raise KeyError(name) def name_of_node(charnode): res = [] @@ -112664,7 +112664,7 @@ if code == 917505: res = 9201 if 917536 <= code <= 917631: res = _charnames_917536[code-917536] if 917760 <= code <= 917999: res = _charnames_917760[code-917760] - if res == -1: raise KeyError, code + if res == -1: raise KeyError(code) return name_of_node(res) # the following dictionary is used by modules that take this as a base diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -58,7 +58,7 @@ try: holders = arguments.match_signature(signature, defs_h) except ArgErr, e: - raise TyperError, "signature mismatch: %s" % e.getmsg(graph.name) + raise TyperError("signature mismatch: %s" % e.getmsg(graph.name)) assert len(holders) == len(rinputs), "argument parsing mismatch" vlist = [] diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -257,7 +257,7 @@ @classmethod def _malloc(cls, n=None): if not isinstance(n, int): - raise TypeError, "array length must be an int" + raise TypeError("array length must be an int") biggercls = get_ctypes_array_of_size(A, n) bigarray = allocate_ctypes(biggercls) if hasattr(bigarray, 'length'): diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,7 +191,7 @@ _adtmeths = {} def _inline_is_varsize(self, last): - raise TypeError, "%r cannot be inlined in structure" % self + raise TypeError("%r cannot be inlined in structure" % self) def _install_extras(self, adtmeths={}, hints={}): self._adtmeths = frozendict(adtmeths) @@ -253,7 +253,7 @@ self._arrayfld = None for name, typ in fields: if name.startswith('_'): - raise NameError, ("%s: field name %r should not start with " + raise NameError("%s: field name %r should not start with " "an underscore" % (self._name, name,)) names.append(name) if name in flds: @@ -311,8 +311,8 @@ def _nofield(self, name): - raise AttributeError, 'struct %s has no field %r' % (self._name, - name) + raise AttributeError('struct %s has no field %r' % (self._name, + name)) def _names_without_voids(self): names_without_voids = [name for name in self._names if self._flds[name] is not Void] @@ -545,7 +545,7 @@ self.ARGS = tuple(args) assert isinstance(result, LowLevelType) if isinstance(result, ContainerType): - raise TypeError, "function result can only be primitive or pointer" + raise TypeError("function result can only be primitive or pointer") self.RESULT = result self.ABI = abi @@ -602,7 +602,7 @@ return "%s (gcopaque)" % self.tag def _inline_is_varsize(self, last): - raise TypeError, "%r cannot be inlined in structure" % self + raise TypeError("%r cannot be inlined in structure" % self) class ForwardReference(ContainerType): @@ -714,7 +714,7 @@ _cache = WeakValueDictionary() # cache the Ptrs def __new__(cls, TO, use_cache=True): if not isinstance(TO, ContainerType): - raise TypeError, ("can only point to a Container type, " + raise TypeError("can only point to a Container type, " "not to %s" % (TO,)) if not use_cache: obj = LowLevelType.__new__(cls) @@ -835,7 +835,7 @@ def cast_primitive(TGT, value): ORIG = typeOf(value) if not isinstance(TGT, Primitive) or not isinstance(ORIG, Primitive): - raise TypeError, "can only primitive to primitive" + raise TypeError("can only primitive to primitive") if ORIG == TGT: return value if ORIG == Char or ORIG == UniChar: @@ -855,7 +855,7 @@ return float(value) if ORIG == LongFloat and TGT == Float: return float(value) - raise TypeError, "unsupported cast" + raise TypeError("unsupported cast") def _cast_whatever(TGT, value): from rpython.rtyper.lltypesystem import llmemory, rffi @@ -932,13 +932,13 @@ def cast_pointer(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): - raise TypeError, "can only cast pointers to other pointers" + raise TypeError("can only cast pointers to other pointers") return ptr._cast_to(PTRTYPE) def cast_opaque_ptr(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): - raise TypeError, "can only cast pointers to other pointers" + raise TypeError("can only cast pointers to other pointers") if CURTYPE == PTRTYPE: return ptr if CURTYPE.TO._gckind != PTRTYPE.TO._gckind: @@ -989,9 +989,9 @@ """ CURTYPE = typeOf(structptr).TO if not isinstance(CURTYPE, Struct): - raise TypeError, "direct_fieldptr: not a struct" + raise TypeError("direct_fieldptr: not a struct") if fieldname not in CURTYPE._flds: - raise TypeError, "%s has no field %r" % (CURTYPE, fieldname) + raise TypeError("%s has no field %r" % (CURTYPE, fieldname)) if not structptr: raise RuntimeError("direct_fieldptr: NULL argument") return _subarray._makeptr(structptr._obj, fieldname, structptr._solid) @@ -1004,7 +1004,7 @@ """ CURTYPE = typeOf(arrayptr).TO if not isinstance(CURTYPE, (Array, FixedSizeArray)): - raise TypeError, "direct_arrayitems: not an array" + raise TypeError("direct_arrayitems: not an array") if not arrayptr: raise RuntimeError("direct_arrayitems: NULL argument") return _subarray._makeptr(arrayptr._obj, 0, arrayptr._solid) @@ -1247,7 +1247,7 @@ from rpython.rtyper.lltypesystem import rffi if isinstance(self._T, FuncType): if len(args) != len(self._T.ARGS): - raise TypeError,"calling %r with wrong argument number: %r" % (self._T, args) + raise TypeError("calling %r with wrong argument number: %r" % (self._T, args)) for i, a, ARG in zip(range(len(self._T.ARGS)), args, self._T.ARGS): if typeOf(a) != ARG: # ARG could be Void @@ -1272,11 +1272,11 @@ pass else: args_repr = [typeOf(arg) for arg in args] - raise TypeError, ("calling %r with wrong argument " + raise TypeError("calling %r with wrong argument " "types: %r" % (self._T, args_repr)) callb = self._obj._callable if callb is None: - raise RuntimeError,"calling undefined function" + raise RuntimeError("calling undefined function") return callb(*args) raise TypeError("%r instance is not a function" % (self._T,)) @@ -1421,7 +1421,7 @@ self._set_offsets(_offsets) def __nonzero__(self): - raise RuntimeError, "do not test an interior pointer for nullity" + raise RuntimeError("do not test an interior pointer for nullity") def _get_obj(self): ob = self._parent @@ -1657,9 +1657,9 @@ def __init__(self, TYPE, n, initialization=None, parent=None, parentindex=None): if not is_valid_int(n): - raise TypeError, "array length must be an int" + raise TypeError("array length must be an int") if n < 0: - raise ValueError, "negative array length" + raise ValueError("negative array length") _parentable.__init__(self, TYPE) myrange = self._check_range(n) self.items = [TYPE.OF._allocate(initialization=initialization, @@ -1977,9 +1977,9 @@ assert n is None o = _opaque(T, initialization=initialization) else: - raise TypeError, "malloc: unmallocable type" + raise TypeError("malloc: unmallocable type") if flavor == 'gc' and T._gckind != 'gc' and not immortal: - raise TypeError, "gc flavor malloc of a non-GC non-immortal structure" + raise TypeError("gc flavor malloc of a non-GC non-immortal structure") if flavor == "raw" and not immortal and track_allocation: leakfinder.remember_malloc(o, framedepth=2) solid = immortal or flavor == 'raw' @@ -1987,10 +1987,10 @@ def free(p, flavor, track_allocation=True): if flavor.startswith('gc'): - raise TypeError, "gc flavor free" + raise TypeError("gc flavor free") T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': - raise TypeError, "free(): only for pointers to non-gc containers" + raise TypeError("free(): only for pointers to non-gc containers") if track_allocation: leakfinder.remember_free(p._obj0) p._obj0._free() @@ -1998,7 +1998,7 @@ def render_immortal(p, track_allocation=True): T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': - raise TypeError, "free(): only for pointers to non-gc containers" + raise TypeError("free(): only for pointers to non-gc containers") if track_allocation: leakfinder.remember_free(p._obj0) @@ -2033,7 +2033,7 @@ def functionptr(TYPE, name, **attrs): if not isinstance(TYPE, FuncType): - raise TypeError, "functionptr() for FuncTypes only" + raise TypeError("functionptr() for FuncTypes only") try: hash(tuple(attrs.items())) except TypeError: @@ -2046,7 +2046,7 @@ def opaqueptr(TYPE, name, **attrs): if not isinstance(TYPE, OpaqueType): - raise TypeError, "opaqueptr() for OpaqueTypes only" + raise TypeError("opaqueptr() for OpaqueTypes only") o = _opaque(TYPE, _name=name, **attrs) return _ptr(Ptr(TYPE), o, solid=True) @@ -2064,23 +2064,23 @@ def attachRuntimeTypeInfo(GCSTRUCT, funcptr=None, destrptr=None, customtraceptr=None): if not isinstance(GCSTRUCT, RttiStruct): - raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT + raise TypeError("expected a RttiStruct: %s" % GCSTRUCT) GCSTRUCT._attach_runtime_type_info_funcptr(funcptr, destrptr, customtraceptr) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) def getRuntimeTypeInfo(GCSTRUCT): if not isinstance(GCSTRUCT, RttiStruct): - raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT + raise TypeError("expected a RttiStruct: %s" % GCSTRUCT) if GCSTRUCT._runtime_type_info is None: - raise ValueError, ("no attached runtime type info for GcStruct %s" % + raise ValueError("no attached runtime type info for GcStruct %s" % GCSTRUCT._name) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) def runtime_type_info(p): T = typeOf(p) if not isinstance(T, Ptr) or not isinstance(T.TO, RttiStruct): - raise TypeError, "runtime_type_info on non-RttiStruct pointer: %s" % p + raise TypeError("runtime_type_info on non-RttiStruct pointer: %s" % p) struct = p._obj top_parent = top_container(struct) result = getRuntimeTypeInfo(top_parent._TYPE) @@ -2090,7 +2090,7 @@ T = typeOf(query_funcptr).TO.ARGS[0] result2 = query_funcptr(cast_pointer(T, p)) if result != result2: - raise RuntimeError, ("runtime type-info function for %s:\n" + raise RuntimeError("runtime type-info function for %s:\n" " returned: %s,\n" "should have been: %s" % (p, result2, result)) return result diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -379,7 +379,7 @@ def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr if not (hop.args_r[1] == rstr.char_repr and hop.args_r[2] == rstr.char_repr): - raise TyperError, 'replace only works for char args' + raise TyperError('replace only works for char args') v_str, v_c1, v_c2 = hop.inputargs(rstr.repr, rstr.char_repr, rstr.char_repr) hop.exception_cannot_occur() return hop.gendirectcall(self.ll.ll_replace_chr_chr, v_str, v_c1, v_c2) diff --git a/rpython/translator/backendopt/all.py b/rpython/translator/backendopt/all.py --- a/rpython/translator/backendopt/all.py +++ b/rpython/translator/backendopt/all.py @@ -22,12 +22,12 @@ try: mod = __import__(module, {}, {}, ['__doc__']) except ImportError, e: - raise Exception, "Import error loading %s: %s" % (dottedname, e) + raise Exception("Import error loading %s: %s" % (dottedname, e)) try: func = getattr(mod, name) except AttributeError: - raise Exception, "Function %s not found in module" % dottedname + raise Exception("Function %s not found in module" % dottedname) return func diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -178,7 +178,7 @@ else: return self.db.get(value) else: - raise TypeError, "expr(%r)" % (v,) + raise TypeError("expr(%r)" % (v,)) # ____________________________________________________________ diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -934,7 +934,7 @@ elif hasattr(fnobj._callable, "c_name"): return [] else: - raise ValueError, "don't know how to generate code for %r" % (fnobj,) + raise ValueError("don't know how to generate code for %r" % (fnobj,)) class ExtType_OpaqueNode(ContainerNode): nodekind = 'rpyopaque' diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -35,9 +36,6 @@ pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif pypy_asm_stack_bottom(); -#ifdef PYPY_X86_CHECK_SSE2_DEFINED - pypy_x86_check_sse2(); -#endif instrument_setup(); #ifndef MS_WINDOWS @@ -83,6 +81,9 @@ int PYPY_MAIN_FUNCTION(int argc, char *argv[]) { +#ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_x86_check_sse2(); +#endif return pypy_main_function(argc, argv); } diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -627,7 +627,7 @@ elif output.startswith('T'): return output[1:] else: - raise ValueError, 'probing for env var returned %r' % (output,) + raise ValueError('probing for env var returned %r' % (output,)) def test_dictlike_environ_getitem(): def fn(s): diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -234,9 +234,9 @@ if os.WIFEXITED(status): status = os.WEXITSTATUS(status) if status != 0: - raise Exception, "instrumentation child failed: %d" % status + raise Exception("instrumentation child failed: %d" % status) else: - raise Exception, "instrumentation child aborted" + raise Exception("instrumentation child aborted") import array, struct n = datafile.size()//struct.calcsize('L') datafile = datafile.open('rb') diff --git a/rpython/translator/gensupp.py b/rpython/translator/gensupp.py --- a/rpython/translator/gensupp.py +++ b/rpython/translator/gensupp.py @@ -39,7 +39,7 @@ before generating any new names.""" for name in txt.split(): if name in self.seennames: - raise NameError, "%s has already been seen!" + raise NameError("%s has already been seen!") self.seennames[name] = 1 def _ensure_unique(self, basename): diff --git a/rpython/translator/goal/bpnn.py b/rpython/translator/goal/bpnn.py --- a/rpython/translator/goal/bpnn.py +++ b/rpython/translator/goal/bpnn.py @@ -74,7 +74,7 @@ def update(self, inputs): if len(inputs) != self.ni-1: - raise ValueError, 'wrong number of inputs' + raise ValueError('wrong number of inputs') # input activations for i in range(self.ni-1): @@ -100,7 +100,7 @@ def backPropagate(self, targets, N, M): if len(targets) != self.no: - raise ValueError, 'wrong number of target values' + raise ValueError('wrong number of target values') # calculate error terms for output output_deltas = [0.0] * self.no diff --git a/rpython/translator/goal/richards.py b/rpython/translator/goal/richards.py --- a/rpython/translator/goal/richards.py +++ b/rpython/translator/goal/richards.py @@ -144,6 +144,9 @@ class TaskWorkArea(object): def __init__(self): + self.reset() + + def reset(self): self.taskTab = [None] * TASKTABSIZE self.taskList = None @@ -151,7 +154,6 @@ self.holdCount = 0 self.qpktCount = 0 -taskWorkArea = TaskWorkArea() class Task(TaskState): @@ -361,8 +363,7 @@ def run(self, iterations): for i in xrange(iterations): - taskWorkArea.holdCount = 0 - taskWorkArea.qpktCount = 0 + taskWorkArea.reset() IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec()) diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -20,8 +20,8 @@ try: subprocess.check_output([cc, '--version']) except: - raise ValueError,"Could not find compiler specified by cc option" + \ - " '%s', it must be a valid exe file on your path"%cc + raise ValueError("Could not find compiler specified by cc option '%s'," + " it must be a valid exe file on your path" % cc) return MingwPlatform(cc) def Windows(cc=None): @@ -31,7 +31,7 @@ raise Exception("Win64 is not supported. You must either build for Win32" " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) - + def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -94,7 +94,7 @@ name = "msvc" so_ext = 'dll' exe_ext = 'exe' - + relevant_environ = ('PATH', 'INCLUDE', 'LIB') cc = 'cl.exe' @@ -105,7 +105,7 @@ standalone_only = () shared_only = () environ = None - + def __init__(self, cc=None, x64=False): self.x64 = x64 msvc_compiler_environ = find_msvc_env(x64) @@ -134,7 +134,7 @@ else: masm32 = 'ml.exe' masm64 = 'ml64.exe' - + if x64: self.masm = masm64 else: @@ -338,10 +338,10 @@ definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) rules.append(('$(OBJECTS)', 'stdafx.pch', [])) - rules.append(('stdafx.pch', 'stdafx.h', + rules.append(('stdafx.pch', 'stdafx.h', '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '$(CREATE_PCH) $(INCLUDEDIRS)')) - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) #Do not use precompiled headers for some files @@ -361,7 +361,7 @@ '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) else: - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) @@ -371,7 +371,7 @@ for rule in rules: m.rule(*rule) - + if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -116,7 +116,7 @@ print >>f, " ",op print >>f, '--end--' return - raise TypeError, "don't know about %r" % x + raise TypeError("don't know about %r" % x) def view(self): From noreply at buildbot.pypy.org Mon May 5 21:08:15 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 5 May 2014 21:08:15 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge upstream Message-ID: <20140505190815.9FCB51C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71283:01e1b2dbf81e Date: 2014-05-05 12:07 -0700 http://bitbucket.org/pypy/pypy/changeset/01e1b2dbf81e/ Log: merge upstream diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -202,30 +202,6 @@ return w_result.buffer_w(space, flags) raise TypeError - def readbuf_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_memoryview): - return w_result.readbuf_w(space) - return self.buffer_w(space, space.BUF_SIMPLE) - - def writebuf_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_memoryview): - return w_result.writebuf_w(space) - return self.buffer_w(space, space.BUF_WRITABLE) - - def charbuf_w(self, space): - w_impl = space.lookup(self, '__buffer__') - if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) - if space.isinstance_w(w_result, space.w_memoryview): - return w_result.charbuf_w(space) - return self.buffer_w(space, space.BUF_SIMPLE).as_str() - def bytes_w(self, space): self._typed_unwrap_error(space, "bytes") @@ -1369,31 +1345,33 @@ return w_obj.buffer_w(self, flags) except TypeError: raise oefmt(self.w_TypeError, - "'%T' does not have the buffer interface", w_obj) + "'%T' does not support the buffer interface", w_obj) def readbuf_w(self, w_obj): # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) try: - return w_obj.readbuf_w(self) + return w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: raise oefmt(self.w_TypeError, - "expected a readable buffer object") + "expected an object with a buffer interface") def writebuf_w(self, w_obj): # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) try: - return w_obj.writebuf_w(self) + return w_obj.buffer_w(self, self.BUF_WRITABLE) except TypeError: raise oefmt(self.w_TypeError, - "expected a writeable buffer object") + "expected an object with a writable buffer interface") def charbuf_w(self, w_obj): # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) try: - return w_obj.charbuf_w(self) + buf = w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: raise oefmt(self.w_TypeError, - "expected a character buffer object") + "expected an object with a buffer interface") + else: + return buf.as_str() def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): @@ -1410,15 +1388,11 @@ code = 's*' if code == 's*': if self.isinstance_w(w_obj, self.w_str): - return w_obj.readbuf_w(self) + return StringBuffer(w_obj.bytes_w(self)) if self.isinstance_w(w_obj, self.w_unicode): return StringBuffer(w_obj.identifier_w(self)) try: - return w_obj.buffer_w(self, 0) - except TypeError: - pass - try: - return w_obj.readbuf_w(self) + return w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: self._getarg_error("bytes or buffer", w_obj) elif code == 's#': @@ -1427,7 +1401,7 @@ if self.isinstance_w(w_obj, self.w_unicode): return w_obj.identifier_w(self) try: - return w_obj.readbuf_w(self).as_str() + return w_obj.buffer_w(self, self.BUF_SIMPLE).as_str() except TypeError: self._getarg_error("bytes or read-only buffer", w_obj) elif code == 'w*': @@ -1435,13 +1409,15 @@ try: return w_obj.buffer_w(self, self.BUF_WRITABLE) except OperationError: - self._getarg_error("read-write buffer", w_obj) + pass except TypeError: pass + self._getarg_error("read-write buffer", w_obj) + elif code == 'y*': try: - return w_obj.writebuf_w(self) + return w_obj.buffer_w(self, self.BUF_SIMPLE) except TypeError: - self._getarg_error("read-write buffer", w_obj) + self._getarg_error("bytes or buffer", w_obj) else: assert False @@ -1463,28 +1439,11 @@ try: buf = w_obj.buffer_w(self, 0) except TypeError: - pass - else: - return buf.as_str() - try: - buf = w_obj.readbuf_w(self) - except TypeError: raise oefmt(self.w_TypeError, "'%T' does not support the buffer interface", w_obj) else: return buf.as_str() - def bufferstr_or_u_w(self, w_obj): - """Returns an interp-level str, directly if possible. - - Accepts unicode or any type supporting the buffer - interface. Unicode objects will be encoded to the default - encoding (UTF-8) - """ - if self.isinstance_w(w_obj, self.w_unicode): - return w_obj.identifier_w(self) - return self.bufferstr_w(w_obj) - def str_or_None_w(self, w_obj): if self.is_w(w_obj, self.w_None): return None diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -129,9 +129,6 @@ def visit_bufferstr(self, el, app_sig): self.checked_space_method(el, app_sig) - def visit_bufferstr_or_u(self, el, app_sig): - self.checked_space_method(el, app_sig) - def visit_str_or_None(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -251,9 +248,6 @@ def visit_bufferstr(self, typ): self.run_args.append("space.bufferstr_w(%s)" % (self.scopenext(),)) - def visit_bufferstr_or_u(self, typ): - self.run_args.append("space.bufferstr_or_u_w(%s)" % (self.scopenext(),)) - def visit_str_or_None(self, typ): self.run_args.append("space.str_or_None_w(%s)" % (self.scopenext(),)) @@ -397,9 +391,6 @@ def visit_bufferstr(self, typ): self.unwrap.append("space.bufferstr_w(%s)" % (self.nextarg(),)) - def visit_bufferstr_or_u(self, typ): - self.unwrap.append("space.bufferstr_or_u_w(%s)" % (self.nextarg(),)) - def visit_str_or_None(self, typ): self.unwrap.append("space.str_or_None_w(%s)" % (self.nextarg(),)) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -769,9 +769,9 @@ return -1 return space.int_w(w_code) - at unwrap_spec(string='bufferstr_or_u', errors='str_or_None', - w_final=WrappedDefault(False)) -def unicode_escape_decode(space, string, errors="strict", w_final=None): + at unwrap_spec(errors='str_or_None', w_final=WrappedDefault(False)) +def unicode_escape_decode(space, w_string, errors="strict", w_final=None): + string = space.getarg_w('s*', w_string).as_str() if errors is None: errors = 'strict' final = space.is_true(w_final) @@ -789,9 +789,9 @@ # ____________________________________________________________ # Raw Unicode escape (accepts bytes or str) - at unwrap_spec(string='bufferstr_or_u', errors='str_or_None', - w_final=WrappedDefault(False)) -def raw_unicode_escape_decode(space, string, errors="strict", w_final=None): + at unwrap_spec(errors='str_or_None', w_final=WrappedDefault(False)) +def raw_unicode_escape_decode(space, w_string, errors="strict", w_final=None): + string = space.getarg_w('s*', w_string).as_str() if errors is None: errors = 'strict' final = space.is_true(w_final) @@ -828,14 +828,16 @@ # support for the "string escape" translation # This is a bytes-to bytes transformation - at unwrap_spec(data="bufferstr", errors='str_or_None') -def escape_encode(space, data, errors='strict'): + at unwrap_spec(errors='str_or_None') +def escape_encode(space, w_data, errors='strict'): + data = space.bytes_w(w_data) from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, False) return space.newtuple([space.wrapbytes(result), space.wrap(len(data))]) - at unwrap_spec(data='bufferstr_or_u', errors='str_or_None') -def escape_decode(space, data, errors='strict'): + at unwrap_spec(errors='str_or_None') +def escape_decode(space, w_data, errors='strict'): + data = space.getarg_w('s#', w_data) from pypy.interpreter.pyparser.parsestring import PyString_DecodeEscape result = PyString_DecodeEscape(space, data, errors, None) return space.newtuple([space.wrapbytes(result), space.wrap(len(data))]) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -710,7 +710,7 @@ def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") - data = space.bufferstr_w(w_data) + data = space.getarg_w('y*', w_data).as_str() size = len(data) with self.lock: diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -340,7 +340,7 @@ def write_w(self, space, w_data): self._check_closed(space) self._check_writable(space) - data = space.bufferstr_w(w_data) + data = space.getarg_w('y*', w_data).as_str() try: n = os.write(self.fd, data) diff --git a/pypy/module/_io/test/test_bytesio.py b/pypy/module/_io/test/test_bytesio.py --- a/pypy/module/_io/test/test_bytesio.py +++ b/pypy/module/_io/test/test_bytesio.py @@ -44,7 +44,7 @@ assert f.write(b"") == 0 assert f.write(b"hello") == 5 exc = raises(TypeError, f.write, u"lo") - assert str(exc.value) == "'str' does not have the buffer interface" + assert str(exc.value) == "'str' does not support the buffer interface" import gc; gc.collect() assert f.getvalue() == b"hello" f.close() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -363,12 +363,6 @@ def buffer_w(self, space, flags): return RawFFIBuffer(self) - def readbuf_w(self, space): - return RawFFIBuffer(self) - - def writebuf_w(self, space): - return RawFFIBuffer(self) - def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -141,12 +141,6 @@ def buffer_w(self, space, flags): return ArrayBuffer(self, False) - def readbuf_w(self, space): - return ArrayBuffer(self, True) - - def writebuf_w(self, space): - return ArrayBuffer(self, False) - def descr_append(self, space, w_x): """ append(x) diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -466,9 +466,9 @@ # Unmarshaller with inlined buffer string def __init__(self, space, w_str): Unmarshaller.__init__(self, space, None) - self.bufstr = space.getarg_w('s#', w_str) + self.buf = space.getarg_w('y*', w_str) self.bufpos = 0 - self.limit = len(self.bufstr) + self.limit = self.buf.getlength() def raise_eof(self): space = self.space @@ -481,14 +481,14 @@ if newpos > self.limit: self.raise_eof() self.bufpos = newpos - return self.bufstr[pos : newpos] + return self.buf.getslice(pos, newpos, 1, newpos - pos) def get1(self): pos = self.bufpos if pos >= self.limit: self.raise_eof() self.bufpos = pos + 1 - return self.bufstr[pos] + return self.buf.getitem(pos) def get_int(self): pos = self.bufpos @@ -496,10 +496,10 @@ if newpos > self.limit: self.raise_eof() self.bufpos = newpos - a = ord(self.bufstr[pos]) - b = ord(self.bufstr[pos+1]) - c = ord(self.bufstr[pos+2]) - d = ord(self.bufstr[pos+3]) + a = ord(self.buf.getitem(pos)) + b = ord(self.buf.getitem(pos+1)) + c = ord(self.buf.getitem(pos+2)) + d = ord(self.buf.getitem(pos+3)) if d & 0x80: d -= 0x100 x = a | (b<<8) | (c<<16) | (d<<24) @@ -511,10 +511,10 @@ if newpos > self.limit: self.raise_eof() self.bufpos = newpos - a = ord(self.bufstr[pos]) - b = ord(self.bufstr[pos+1]) - c = ord(self.bufstr[pos+2]) - d = ord(self.bufstr[pos+3]) + a = ord(self.buf.getitem(pos)) + b = ord(self.buf.getitem(pos+1)) + c = ord(self.buf.getitem(pos+2)) + d = ord(self.buf.getitem(pos+3)) x = a | (b<<8) | (c<<16) | (d<<24) if x >= 0: return x diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -340,12 +340,6 @@ def buffer_w(self, space, flags): return self.descr_ravel(space).buffer_w(space, flags) - def readbuf_w(self, space): - return self.descr_ravel(space).readbuf_w(space) - - def charbuf_w(self, space): - return self.descr_ravel(space).charbuf_w(space) - def descr_byteswap(self, space): return self.get_dtype(space).itemtype.byteswap(self) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -621,15 +621,6 @@ def buffer_w(self, space, flags): return self.implementation.get_buffer(space, True) - def readbuf_w(self, space): - return self.implementation.get_buffer(space, True) - - def writebuf_w(self, space): - return self.implementation.get_buffer(space, False) - - def charbuf_w(self, space): - return self.implementation.get_buffer(space, True).as_str() - def descr_get_data(self, space): return space.newbuffer(self.implementation.get_buffer(space, False)) diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -17,9 +17,9 @@ self.space = space self.mmap = mmap_obj - def readbuf_w(self, space): + def buffer_w(self, space, flags): self.check_valid() - return MMapBuffer(self.space, self.mmap, True) + return MMapBuffer(self.space, self.mmap, flags & space.BUF_WRITABLE) def close(self): self.mmap.close() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -31,15 +31,6 @@ def buffer_w(self, space, flags): return BytearrayBuffer(self.data, False) - def readbuf_w(self, space): - return BytearrayBuffer(self.data, True) - - def writebuf_w(self, space): - return BytearrayBuffer(self.data, False) - - def charbuf_w(self, space): - return ''.join(self.data) - def _new(self, value): return W_BytearrayObject(_make_data(value)) @@ -59,7 +50,8 @@ raise oefmt(space.w_IndexError, "bytearray index out of range") return space.wrap(ord(character)) - _val = charbuf_w + def _val(self, space): + return ''.join(self.data) @staticmethod def _op_val(space, w_other): diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -402,13 +402,6 @@ space.check_buf_flags(flags, True) return StringBuffer(self._value) - def readbuf_w(self, space): - return StringBuffer(self._value) - - def writebuf_w(self, space): - raise OperationError(space.w_TypeError, space.wrap( - "Cannot use string as modifiable buffer")) - def listview_int(self): return _create_list_from_bytes(self._value) @@ -440,7 +433,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return space.charbuf_w(w_other) + return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() def _chr(self, char): assert len(char) == 1 diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -39,9 +39,6 @@ def buffer_w(self, space, flags): return StringBuffer(self.force()) - def readbuf_w(self, space): - return StringBuffer(self.force()) - def descr_len(self, space): return space.wrap(self.length) From noreply at buildbot.pypy.org Mon May 5 22:47:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 5 May 2014 22:47:33 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: test/fix searchsorted return type for scalars Message-ID: <20140505204733.96FEE1C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71284:6296ec75007c Date: 2014-05-05 15:02 -0400 http://bitbucket.org/pypy/pypy/changeset/6296ec75007c/ Log: test/fix searchsorted return type for scalars (grafted from 3581f7a906c91c9c57ea024c4242546af0a37e3e) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -738,6 +738,8 @@ ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) + if ret.is_scalar(): + return ret.get_scalar_value() return ret def descr_setasflat(self, space, w_v): diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -351,13 +351,21 @@ assert (x.argsort(kind='m') == np.arange(32)).all() def test_searchsort(self): - from numpy import arange + import numpy as np import sys - a = arange(1, 6) + a = np.arange(1, 6) ret = a.searchsorted(3) assert ret == 2 + assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array(3)) + assert ret == 2 + assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array([3])) + assert ret == 2 + assert isinstance(ret, np.ndarray) ret = a.searchsorted(3, side='right') assert ret == 3 + assert isinstance(ret, np.generic) ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: From noreply at buildbot.pypy.org Mon May 5 22:47:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 May 2014 22:47:34 +0200 (CEST) Subject: [pypy-commit] pypy default: typos (thanks Ryan) Message-ID: <20140505204734.C118B1C01DE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71285:c9c45912211f Date: 2014-05-05 23:46 +0300 http://bitbucket.org/pypy/pypy/changeset/c9c45912211f/ Log: typos (thanks Ryan) diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -84,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix an RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -125,7 +125,7 @@ and scalars were corrected. We are slowly approaching our goal of passing the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the NumPy c +* Speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should be apparent in all NumPy functionality. @@ -135,7 +135,7 @@ * A cffi-based ``numpy.random`` module is available as a branch; it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load +* Enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. From noreply at buildbot.pypy.org Mon May 5 22:47:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 5 May 2014 22:47:35 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: typos (thanks Ryan) Message-ID: <20140505204735.D819C1C01DE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71286:030cfa02b9af Date: 2014-05-05 23:46 +0300 http://bitbucket.org/pypy/pypy/changeset/030cfa02b9af/ Log: typos (thanks Ryan) diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -84,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix an RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -125,7 +125,7 @@ and scalars were corrected. We are slowly approaching our goal of passing the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the NumPy c +* Speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should be apparent in all NumPy functionality. @@ -135,7 +135,7 @@ * A cffi-based ``numpy.random`` module is available as a branch; it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load +* Enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. From noreply at buildbot.pypy.org Mon May 5 23:10:20 2014 From: noreply at buildbot.pypy.org (stefanor) Date: Mon, 5 May 2014 23:10:20 +0200 (CEST) Subject: [pypy-commit] pypy default: We now need -fPIC on PPC or translation will blow up with R_PPC_REL24 relocations that are out of range Message-ID: <20140505211020.E716C1C328C@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: Changeset: r71287:11c4878ea354 Date: 2014-05-05 23:09 +0200 http://bitbucket.org/pypy/pypy/changeset/11c4878ea354/ Log: We now need -fPIC on PPC or translation will blow up with R_PPC_REL24 relocations that are out of range diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -267,7 +267,7 @@ # Only required on armhf and mips{,el}, not armel. But there's no way to # detect armhf without shelling out if (platform.architecture()[0] == '64bit' - or platform.machine().startswith(('arm', 'mips'))): + or platform.machine().startswith(('arm', 'mips', 'ppc'))): host_factory = LinuxPIC else: host_factory = Linux From noreply at buildbot.pypy.org Tue May 6 00:18:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 00:18:59 +0200 (CEST) Subject: [pypy-commit] pypy py3k: kill long literal Message-ID: <20140505221859.76C541C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71288:345171b6d983 Date: 2014-05-05 12:10 -0700 http://bitbucket.org/pypy/pypy/changeset/345171b6d983/ Log: kill long literal diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -47,8 +47,6 @@ res = t.get_overload("staticAddOneToInt").call(None, 1) assert res == 2 - res = t.get_overload("staticAddOneToInt").call(None, 1L) - assert res == 2 res = t.get_overload("staticAddOneToInt").call(None, 1, 2) assert res == 4 res = t.get_overload("staticAddOneToInt").call(None, -1) From noreply at buildbot.pypy.org Tue May 6 00:19:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 00:19:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adjust expected per py3k Message-ID: <20140505221900.BD8F51C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71289:fc93c113016b Date: 2014-05-05 12:13 -0700 http://bitbucket.org/pypy/pypy/changeset/fc93c113016b/ Log: adjust expected per py3k diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py --- a/pypy/module/_lsprof/test/test_cprofile.py +++ b/pypy/module/_lsprof/test/test_cprofile.py @@ -38,7 +38,7 @@ prof.disable() stats = prof.getstats() expected = ( - "", + "", "", ) by_id = set() From noreply at buildbot.pypy.org Tue May 6 00:19:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 00:19:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt tests from default Message-ID: <20140505221902.04ADC1C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71290:96a00ddec0cd Date: 2014-05-05 13:01 -0700 http://bitbucket.org/pypy/pypy/changeset/96a00ddec0cd/ Log: adapt tests from default diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -196,7 +196,7 @@ def test_bad_typecode(self): import marshal - exc = raises(ValueError, marshal.loads, chr(1)) + exc = raises(ValueError, marshal.loads, bytes([1])) assert str(exc.value) == "bad marshal data (unknown type code)" def test_bad_data(self): diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -528,7 +528,7 @@ f.close() - def test_buffer(self): + def test_memoryview(self): from mmap import mmap f = open(self.tmpname + "y", "bw+") f.write(b"foobar") @@ -542,18 +542,6 @@ m.close() f.close() - def test_memoryview(self): - from mmap import mmap - f = open(self.tmpname + "y", "w+") - f.write("foobar") - f.flush() - m = mmap(f.fileno(), 6) - m[5] = '?' - exc = raises(TypeError, memoryview, m) - assert 'buffer interface' in str(exc.value) - m.close() - f.close() - def test_offset(self): from mmap import mmap, ALLOCATIONGRANULARITY f = open(self.tmpname + "y", "wb+") From noreply at buildbot.pypy.org Tue May 6 00:19:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 00:19:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: use the new buffer interface, adapt tests Message-ID: <20140505221903.6C0931C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71291:7549430fa354 Date: 2014-05-05 13:01 -0700 http://bitbucket.org/pypy/pypy/changeset/7549430fa354/ Log: use the new buffer interface, adapt tests diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -92,11 +92,7 @@ @unwrap_spec(format=str, offset=int) def unpack_from(space, format, w_buffer, offset=0): size = _calcsize(space, format) - buf = space.getarg_w('z*', w_buffer) - if buf is None: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, "unpack_from requires a buffer argument") + buf = space.buffer_w(w_buffer, space.BUF_SIMPLE) if offset < 0: offset += buf.getlength() if offset < 0 or (buf.getlength() - offset) < size: diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -354,32 +354,33 @@ def test_pack_unpack_buffer(self): import array - b = array.array('c', '\x00' * 19) + b = array.array('b', b'\x00' * 19) sz = self.struct.calcsize("ii") for offset in [2, -17]: self.struct.pack_into("ii", b, offset, 17, 42) - assert str(buffer(b)) == ('\x00' * 2 + - self.struct.pack("ii", 17, 42) + - '\x00' * (19-sz-2)) - exc = raises(TypeError, self.struct.pack_into, "ii", buffer(b), 0, 17, 42) - assert str(exc.value) == "buffer is read-only" + assert bytes(memoryview(b)) == (b'\x00' * 2 + + self.struct.pack("ii", 17, 42) + + b'\x00' * (19-sz-2)) + b2 = array.array('b', b'\x00' * 19) + self.struct.pack_into("ii", memoryview(b2), 0, 17, 42) + assert bytes(b2) == self.struct.pack("ii", 17, 42) + (b'\x00' * 11) + exc = raises(TypeError, self.struct.pack_into, "ii", 'test', 0, 17, 42) - assert str(exc.value) == "Cannot use string as modifiable buffer" + assert str(exc.value) == "expected an object with a writable buffer interface" exc = raises(self.struct.error, self.struct.pack_into, "ii", b[0:1], 0, 17, 42) assert str(exc.value) == "pack_into requires a buffer of at least 8 bytes" assert self.struct.unpack_from("ii", b, 2) == (17, 42) assert self.struct.unpack_from("ii", b, -17) == (17, 42) - assert self.struct.unpack_from("ii", buffer(b, 2)) == (17, 42) - assert self.struct.unpack_from("ii", buffer(b), 2) == (17, 42) - assert self.struct.unpack_from("ii", memoryview(buffer(b)), 2) == (17, 42) + assert self.struct.unpack_from("ii", memoryview(b)[2:]) == (17, 42) + assert self.struct.unpack_from("ii", memoryview(b), 2) == (17, 42) exc = raises(TypeError, self.struct.unpack_from, "ii", 123) - assert 'must be string or buffer, not int' in str(exc.value) - exc = raises(self.struct.error, self.struct.unpack_from, "ii", None) - assert str(exc.value) == "unpack_from requires a buffer argument" - exc = raises(self.struct.error, self.struct.unpack_from, "ii", '') + assert str(exc.value) == "'int' does not support the buffer interface" + exc = raises(TypeError, self.struct.unpack_from, "ii", None) + assert str(exc.value) == "'NoneType' does not support the buffer interface" + exc = raises(self.struct.error, self.struct.unpack_from, "ii", b'') assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" - exc = raises(self.struct.error, self.struct.unpack_from, "ii", memoryview('')) + exc = raises(self.struct.error, self.struct.unpack_from, "ii", memoryview(b'')) assert str(exc.value) == "unpack_from requires a buffer of at least 8 bytes" def test___float__(self): From noreply at buildbot.pypy.org Tue May 6 02:16:14 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 02:16:14 +0200 (CEST) Subject: [pypy-commit] pypy py3k: skip this for now as it's also a problem on CPython, albeit not as easy to Message-ID: <20140506001614.69A9F1C3569@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71292:0177e2f21337 Date: 2014-05-05 17:15 -0700 http://bitbucket.org/pypy/pypy/changeset/0177e2f21337/ Log: skip this for now as it's also a problem on CPython, albeit not as easy to reproduce on there diff --git a/lib-python/3/test/test_weakref.py b/lib-python/3/test/test_weakref.py --- a/lib-python/3/test/test_weakref.py +++ b/lib-python/3/test/test_weakref.py @@ -1174,7 +1174,9 @@ yield Object(v), v finally: it = None # should commit all removals - self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext) + if not support.check_impl_detail(pypy=True): + # XXX: http://bugs.python.org/issue21173 + self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext) def test_weak_values_destroy_while_iterating(self): # Issue #7105: iterators shouldn't crash when a key is implicitly removed From noreply at buildbot.pypy.org Tue May 6 02:33:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 02:33:49 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix memoryview.readonly degrading to int from bool Message-ID: <20140506003349.E1E431D2371@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71293:59dc90f4fe8a Date: 2014-05-05 17:33 -0700 http://bitbucket.org/pypy/pypy/changeset/59dc90f4fe8a/ Log: fix memoryview.readonly degrading to int from bool diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -19,7 +19,8 @@ def buffer_w(self, space, flags): self.check_valid() - return MMapBuffer(self.space, self.mmap, flags & space.BUF_WRITABLE) + return MMapBuffer(self.space, self.mmap, + bool(flags & space.BUF_WRITABLE)) def close(self): self.mmap.close() diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -536,6 +536,7 @@ m = mmap(f.fileno(), 6) b = memoryview(m) assert len(b) == 6 + assert b.readonly is False assert b[3] == b"b" assert b[:] == b"foobar" del b # For CPython: "exported pointers exist" From noreply at buildbot.pypy.org Tue May 6 03:34:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:34:49 +0200 (CEST) Subject: [pypy-commit] pypy default: fill in missing module names Message-ID: <20140506013449.E03411D236E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71294:fb04fb1b644b Date: 2014-05-05 18:33 -0700 http://bitbucket.org/pypy/pypy/changeset/fb04fb1b644b/ Log: fill in missing module names diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -152,7 +152,7 @@ space.call_method(self.getdict(space), "update", w_dict) W_BytesIO.typedef = TypeDef( - 'BytesIO', W_BufferedIOBase.typedef, + '_io.BytesIO', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BytesIO), __init__ = interp2app(W_BytesIO.descr_init), diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -429,7 +429,7 @@ return w_size W_FileIO.typedef = TypeDef( - 'FileIO', W_RawIOBase.typedef, + '_io.FileIO', W_RawIOBase.typedef, __new__ = interp2app(W_FileIO.descr_new.im_func), __init__ = interp2app(W_FileIO.descr_init), __repr__ = interp2app(W_FileIO.repr_w), diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,9 +27,9 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - 'BlockingIOError', W_IOError.typedef, - __doc__ = ("Exception raised when I/O would block " - "on a non-blocking I/O stream"), + '_io.BlockingIOError', W_IOError.typedef, + __doc__ = ("Exception raised when I/O would block on a non-blocking " + "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), __init__ = interp2app(W_BlockingIOError.descr_init), characters_written = interp_attrproperty('written', W_BlockingIOError), diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -288,7 +288,7 @@ break W_IOBase.typedef = TypeDef( - '_IOBase', + '_io._IOBase', __new__ = generic_new_descr(W_IOBase), __enter__ = interp2app(W_IOBase.enter_w), __exit__ = interp2app(W_IOBase.exit_w), @@ -359,7 +359,7 @@ return space.wrap(builder.build()) W_RawIOBase.typedef = TypeDef( - '_RawIOBase', W_IOBase.typedef, + '_io._RawIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_RawIOBase), read = interp2app(W_RawIOBase.read_w), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -178,7 +178,7 @@ space.call_method(self.w_decoder, "setstate", w_state) W_IncrementalNewlineDecoder.typedef = TypeDef( - 'IncrementalNewlineDecoder', + '_io.IncrementalNewlineDecoder', __new__ = generic_new_descr(W_IncrementalNewlineDecoder), __init__ = interp2app(W_IncrementalNewlineDecoder.descr_init), @@ -255,7 +255,7 @@ W_TextIOBase.typedef = TypeDef( - '_TextIOBase', W_IOBase.typedef, + '_io._TextIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_TextIOBase), read = interp2app(W_TextIOBase.read_w), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -340,3 +340,9 @@ assert res == "world\n" assert f.newlines == "\n" assert type(f.newlines) is unicode + + def test_mod(self): + import _io + typemods = dict((t, t.__module__) for t in vars(_io).values() + if isinstance(t, type)) + assert all(mod in ('io', '_io') for mod in typemods.values()), typemods From noreply at buildbot.pypy.org Tue May 6 03:34:51 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:34:51 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: fill in missing module names Message-ID: <20140506013451.182BF1D236E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: release-2.3.x Changeset: r71295:59cfc1e32628 Date: 2014-05-05 18:33 -0700 http://bitbucket.org/pypy/pypy/changeset/59cfc1e32628/ Log: fill in missing module names (grafted from fb04fb1b644b7d56c9affdead41f3704be5495ee) diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -152,7 +152,7 @@ space.call_method(self.getdict(space), "update", w_dict) W_BytesIO.typedef = TypeDef( - 'BytesIO', W_BufferedIOBase.typedef, + '_io.BytesIO', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BytesIO), __init__ = interp2app(W_BytesIO.descr_init), diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -429,7 +429,7 @@ return w_size W_FileIO.typedef = TypeDef( - 'FileIO', W_RawIOBase.typedef, + '_io.FileIO', W_RawIOBase.typedef, __new__ = interp2app(W_FileIO.descr_new.im_func), __init__ = interp2app(W_FileIO.descr_init), __repr__ = interp2app(W_FileIO.repr_w), diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,9 +27,9 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - 'BlockingIOError', W_IOError.typedef, - __doc__ = ("Exception raised when I/O would block " - "on a non-blocking I/O stream"), + '_io.BlockingIOError', W_IOError.typedef, + __doc__ = ("Exception raised when I/O would block on a non-blocking " + "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), __init__ = interp2app(W_BlockingIOError.descr_init), characters_written = interp_attrproperty('written', W_BlockingIOError), diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -288,7 +288,7 @@ break W_IOBase.typedef = TypeDef( - '_IOBase', + '_io._IOBase', __new__ = generic_new_descr(W_IOBase), __enter__ = interp2app(W_IOBase.enter_w), __exit__ = interp2app(W_IOBase.exit_w), @@ -359,7 +359,7 @@ return space.wrap(builder.build()) W_RawIOBase.typedef = TypeDef( - '_RawIOBase', W_IOBase.typedef, + '_io._RawIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_RawIOBase), read = interp2app(W_RawIOBase.read_w), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -178,7 +178,7 @@ space.call_method(self.w_decoder, "setstate", w_state) W_IncrementalNewlineDecoder.typedef = TypeDef( - 'IncrementalNewlineDecoder', + '_io.IncrementalNewlineDecoder', __new__ = generic_new_descr(W_IncrementalNewlineDecoder), __init__ = interp2app(W_IncrementalNewlineDecoder.descr_init), @@ -255,7 +255,7 @@ W_TextIOBase.typedef = TypeDef( - '_TextIOBase', W_IOBase.typedef, + '_io._TextIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_TextIOBase), read = interp2app(W_TextIOBase.read_w), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -340,3 +340,9 @@ assert res == "world\n" assert f.newlines == "\n" assert type(f.newlines) is unicode + + def test_mod(self): + import _io + typemods = dict((t, t.__module__) for t in vars(_io).values() + if isinstance(t, type)) + assert all(mod in ('io', '_io') for mod in typemods.values()), typemods From noreply at buildbot.pypy.org Tue May 6 03:44:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:44:29 +0200 (CEST) Subject: [pypy-commit] pypy default: forgot to kill this __module__ Message-ID: <20140506014429.8FA6F1C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71296:3a5da776e68e Date: 2014-05-05 18:42 -0700 http://bitbucket.org/pypy/pypy/changeset/3a5da776e68e/ Log: forgot to kill this __module__ diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,7 +442,6 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', - __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __nonzero__ = interp2app(W_CData.nonzero), From noreply at buildbot.pypy.org Tue May 6 03:44:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:44:30 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: forgot to kill this __module__ Message-ID: <20140506014430.B495F1C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: release-2.3.x Changeset: r71297:530e5d850319 Date: 2014-05-05 18:42 -0700 http://bitbucket.org/pypy/pypy/changeset/530e5d850319/ Log: forgot to kill this __module__ (grafted from 3a5da776e68eb60eec90e01935b74f4d39056d51) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,7 +442,6 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', - __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __nonzero__ = interp2app(W_CData.nonzero), From noreply at buildbot.pypy.org Tue May 6 03:44:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:44:32 +0200 (CEST) Subject: [pypy-commit] pypy py3k: disallow unicode Message-ID: <20140506014432.033BB1C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71298:05a472020c13 Date: 2014-05-05 18:34 -0700 http://bitbucket.org/pypy/pypy/changeset/05a472020c13/ Log: disallow unicode diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -136,7 +136,7 @@ def write(space, fd, w_data): """Write a string to a file descriptor. Return the number of bytes actually written, which may be smaller than len(data).""" - data = space.getarg_w('s*', w_data) + data = space.getarg_w('y*', w_data) try: res = os.write(fd, data.as_str()) except OSError, e: From noreply at buildbot.pypy.org Tue May 6 03:44:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:44:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140506014433.4D57C1C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71299:d8096ef0c138 Date: 2014-05-05 18:38 -0700 http://bitbucket.org/pypy/pypy/changeset/d8096ef0c138/ Log: merge default diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -84,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix an RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -125,7 +125,7 @@ and scalars were corrected. We are slowly approaching our goal of passing the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the NumPy c +* Speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should be apparent in all NumPy functionality. @@ -135,7 +135,7 @@ * A cffi-based ``numpy.random`` module is available as a branch; it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load +* Enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -199,8 +199,7 @@ space.call_method(self.getdict(space), "update", w_dict) W_BytesIO.typedef = TypeDef( - 'BytesIO', W_BufferedIOBase.typedef, - __module__ = "_io", + '_io.BytesIO', W_BufferedIOBase.typedef, __new__ = interp2app(W_BytesIO.descr_new.im_func), __init__ = interp2app(W_BytesIO.descr_init), diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -436,8 +436,7 @@ return w_size W_FileIO.typedef = TypeDef( - 'FileIO', W_RawIOBase.typedef, - __module__ = "_io", + '_io.FileIO', W_RawIOBase.typedef, __new__ = interp2app(W_FileIO.descr_new.im_func), __init__ = interp2app(W_FileIO.descr_init), __repr__ = interp2app(W_FileIO.repr_w), diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,10 +27,9 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - 'BlockingIOError', W_IOError.typedef, - __module__ = 'io', - __doc__ = ("Exception raised when I/O would block " - "on a non-blocking I/O stream"), + '_io.BlockingIOError', W_IOError.typedef, + __doc__ = ("Exception raised when I/O would block on a non-blocking " + "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), __init__ = interp2app(W_BlockingIOError.descr_init), characters_written = interp_attrproperty('written', W_BlockingIOError), diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -299,8 +299,7 @@ break W_IOBase.typedef = TypeDef( - '_IOBase', - __module__ = "_io", + '_io._IOBase', __new__ = generic_new_descr(W_IOBase), __enter__ = interp2app(W_IOBase.enter_w), __exit__ = interp2app(W_IOBase.exit_w), @@ -372,8 +371,7 @@ return space.wrapbytes(builder.build()) W_RawIOBase.typedef = TypeDef( - '_RawIOBase', W_IOBase.typedef, - __module__ = "_io", + '_io._RawIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_RawIOBase), read = interp2app(W_RawIOBase.read_w), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -178,8 +178,7 @@ space.call_method(self.w_decoder, "setstate", w_state) W_IncrementalNewlineDecoder.typedef = TypeDef( - 'IncrementalNewlineDecoder', - __module__ = "_io", + '_io.IncrementalNewlineDecoder', __new__ = generic_new_descr(W_IncrementalNewlineDecoder), __init__ = interp2app(W_IncrementalNewlineDecoder.descr_init), @@ -256,8 +255,7 @@ W_TextIOBase.typedef = TypeDef( - '_TextIOBase', W_IOBase.typedef, - __module__ = "_io", + '_io._TextIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_TextIOBase), read = interp2app(W_TextIOBase.read_w), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -381,5 +381,6 @@ def test_mod(self): import _io - assert all(t.__module__ in ('io', '_io') for t in vars(_io).values() - if isinstance(t, type)) + typemods = dict((t, t.__module__) for t in vars(_io).values() + if isinstance(t, type)) + assert all(mod in ('io', '_io') for mod in typemods.values()), typemods diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -729,6 +729,8 @@ ret = W_NDimArray.from_shape( space, v.get_shape(), descriptor.get_dtype_cache(space).w_longdtype) app_searchsort(space, self, v, space.wrap(side), ret) + if ret.is_scalar(): + return ret.get_scalar_value() return ret def descr_setasflat(self, space, w_v): diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -351,13 +351,21 @@ assert (x.argsort(kind='m') == np.arange(32)).all() def test_searchsort(self): - from numpy import arange + import numpy as np import sys - a = arange(1, 6) + a = np.arange(1, 6) ret = a.searchsorted(3) assert ret == 2 + assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array(3)) + assert ret == 2 + assert isinstance(ret, np.generic) + ret = a.searchsorted(np.array([3])) + assert ret == 2 + assert isinstance(ret, np.ndarray) ret = a.searchsorted(3, side='right') assert ret == 3 + assert isinstance(ret, np.generic) ret = a.searchsorted([-10, 10, 2, 3]) assert (ret == [0, 5, 1, 2]).all() if '__pypy__' in sys.builtin_module_names: diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -267,7 +267,7 @@ # Only required on armhf and mips{,el}, not armel. But there's no way to # detect armhf without shelling out if (platform.architecture()[0] == '64bit' - or platform.machine().startswith(('arm', 'mips'))): + or platform.machine().startswith(('arm', 'mips', 'ppc'))): host_factory = LinuxPIC else: host_factory = Linux From noreply at buildbot.pypy.org Tue May 6 03:44:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:44:34 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140506014434.80F231C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71300:9c2e85c01eb9 Date: 2014-05-05 18:43 -0700 http://bitbucket.org/pypy/pypy/changeset/9c2e85c01eb9/ Log: merge default diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,7 +442,6 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', - __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __bool__ = interp2app(W_CData.bool), From noreply at buildbot.pypy.org Tue May 6 03:44:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 6 May 2014 03:44:35 +0200 (CEST) Subject: [pypy-commit] pypy py3k: kill more __module__s Message-ID: <20140506014435.A3CA11C01DE@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71301:6b34e576e6be Date: 2014-05-05 18:43 -0700 http://bitbucket.org/pypy/pypy/changeset/6b34e576e6be/ Log: kill more __module__s diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -97,8 +97,7 @@ space.w_DeprecationWarning) W_BufferedIOBase.typedef = TypeDef( - '_BufferedIOBase', W_IOBase.typedef, - __module__ = "_io", + '_io._BufferedIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_BufferedIOBase), read = interp2app(W_BufferedIOBase.read_w), read1 = interp2app(W_BufferedIOBase.read1_w), @@ -993,8 +992,7 @@ 'isatty']) W_BufferedRWPair.typedef = TypeDef( - 'BufferedRWPair', W_BufferedIOBase.typedef, - __module__ = "_io", + '_io.BufferedRWPair', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BufferedRWPair), __init__ = interp2app(W_BufferedRWPair.descr_init), __getstate__ = interp2app(W_BufferedRWPair.getstate_w), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -251,7 +251,6 @@ W_FilterFalse.typedef = TypeDef( 'itertools.ifilterfalse', - __module__ = 'itertools', __new__ = interp2app(W_FilterFalse___new__), __iter__ = interp2app(W_FilterFalse.iter_w), __next__ = interp2app(W_FilterFalse.next_w), @@ -492,7 +491,6 @@ W_ZipLongest.typedef = TypeDef( 'itertools.zip_longest', - __module__ = 'itertools', __new__ = interp2app(W_ZipLongest___new__), __iter__ = interp2app(W_ZipLongest.iter_w), __next__ = interp2app(W_ZipLongest.next_w), From noreply at buildbot.pypy.org Tue May 6 04:11:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:33 +0200 (CEST) Subject: [pypy-commit] pypy default: oops, fix test_pypy_c.test_buffers Message-ID: <20140506021133.35E331C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71302:a7a4b3c9936f Date: 2014-05-05 19:45 -0400 http://bitbucket.org/pypy/pypy/changeset/a7a4b3c9936f/ Log: oops, fix test_pypy_c.test_buffers diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -56,7 +56,7 @@ guard_false(i99, descr=...) i100 = int_lshift(i98, 24) i101 = int_or(i97, i100) - i102 = getfield_raw(50657056, descr=) + i102 = getfield_raw(\d+, descr=) i103 = int_lt(i102, 0) guard_false(i103, descr=...) """) From noreply at buildbot.pypy.org Tue May 6 04:11:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:34 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify struct.Struct methods now that everything lives at interp level Message-ID: <20140506021134.6C8441C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71303:0928de078261 Date: 2014-05-05 17:25 -0400 http://bitbucket.org/pypy/pypy/changeset/0928de078261/ Log: simplify struct.Struct methods now that everything lives at interp level diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -2,7 +2,6 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator -from rpython.tool.sourcetools import func_with_new_name from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -51,9 +50,9 @@ # XXX inefficient @unwrap_spec(format=str, offset=int) -def pack_into(space, format, w_buf, offset, args_w): +def pack_into(space, format, w_buffer, offset, args_w): res = pack(space, format, args_w).str_w(space) - buf = space.writebuf_w(w_buf) + buf = space.writebuf_w(w_buffer) if offset < 0: offset += buf.getlength() size = len(res) @@ -118,21 +117,19 @@ W_Struct.__init__(self, space, format) return self - def wrap_struct_method(name): - def impl(self, space, __args__): - w_module = space.getbuiltinmodule('struct') - w_method = space.getattr(w_module, space.wrap(name)) - return space.call_obj_args( - w_method, space.wrap(self.format), __args__ - ) + def descr_pack(self, space, args_w): + return pack(space, self.format, args_w) - return func_with_new_name(impl, 'descr_' + name) + @unwrap_spec(offset=int) + def descr_pack_into(self, space, w_buffer, offset, args_w): + return pack_into(space, self.format, w_buffer, offset, args_w) - descr_pack = wrap_struct_method("pack") - descr_unpack = wrap_struct_method("unpack") - descr_pack_into = wrap_struct_method("pack_into") - descr_unpack_from = wrap_struct_method("unpack_from") + def descr_unpack(self, space, w_str): + return unpack(space, self.format, w_str) + @unwrap_spec(offset=int) + def descr_unpack_from(self, space, w_buffer, offset=0): + return unpack_from(space, self.format, w_buffer, offset) W_Struct.typedef = TypeDef("Struct", __new__=interp2app(W_Struct.descr__new__.im_func), diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -403,6 +403,11 @@ assert type(obj2) is float assert obj2 == 42.3 + def test_struct_object(self): + s = self.struct.Struct('i') + assert s.unpack(s.pack(42)) == (42,) + assert s.unpack_from(memoryview(s.pack(42))) == (42,) + class AppTestStructBuffer(object): spaceconfig = dict(usemodules=['struct', '__pypy__']) From noreply at buildbot.pypy.org Tue May 6 04:11:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:35 +0200 (CEST) Subject: [pypy-commit] pypy default: add (skipped) struct object test_pypy_c Message-ID: <20140506021135.930AD1C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71304:b9a02124b908 Date: 2014-05-05 18:36 -0400 http://bitbucket.org/pypy/pypy/changeset/b9a02124b908/ Log: add (skipped) struct object test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -348,51 +348,6 @@ loop, = log.loops_by_id("globalread", is_entry_bridge=True) assert len(loop.ops_by_id("globalread")) == 0 - def test_struct_module(self): - def main(): - import struct - i = 1 - while i < 1000: - x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct - i += x / i - return i - - log = self.run(main) - assert log.result == main() - - loop, = log.loops_by_id("struct") - if sys.maxint == 2 ** 63 - 1: - extra = """ - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - """ - else: - extra = "" - # This could, of course stand some improvement, to remove all these - # arithmatic ops, but we've removed all the core overhead. - assert loop.match_by_id("struct", """ - guard_not_invalidated(descr=...) - # struct.pack - %(32_bit_only)s - i11 = int_and(i4, 255) - i13 = int_rshift(i4, 8) - i14 = int_and(i13, 255) - i16 = int_rshift(i13, 8) - i17 = int_and(i16, 255) - i19 = int_rshift(i16, 8) - i20 = int_and(i19, 255) - - # struct.unpack - i22 = int_lshift(i14, 8) - i23 = int_or(i11, i22) - i25 = int_lshift(i17, 16) - i26 = int_or(i23, i25) - i28 = int_ge(i20, 128) - guard_false(i28, descr=...) - i30 = int_lshift(i20, 24) - i31 = int_or(i26, i30) - """ % {"32_bit_only": extra}) - def test_eval(self): def main(): i = 1 diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -0,0 +1,85 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestStruct(BaseTestPyPyC): + def test_struct_function(self): + def main(n): + import struct + i = 1 + while i < n: + x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct + i += x / i + return i + + log = self.run(main, [1000]) + assert log.result == main(1000) + + loop, = log.loops_by_filename(self.filepath) + # This could, of course stand some improvement, to remove all these + # arithmatic ops, but we've removed all the core overhead. + assert loop.match_by_id("struct", """ + guard_not_invalidated(descr=...) + # struct.pack + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """) + + def test_struct_object(self): + skip("XXX broken") + def main(n): + import struct + s = struct.Struct("i") + i = 1 + while i < n: + x = s.unpack(s.pack(i))[0] # ID: struct + i += x / i + return i + + log = self.run(main, [1000]) + assert log.result == main(1000) + + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('struct', """ + guard_not_invalidated(descr=...) + # struct.pack + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """) From noreply at buildbot.pypy.org Tue May 6 04:11:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:36 +0200 (CEST) Subject: [pypy-commit] pypy default: move _struct.error to interp level, test/fix its __module__ Message-ID: <20140506021136.BF45D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71305:40142188f76a Date: 2014-05-05 17:24 -0400 http://bitbucket.org/pypy/pypy/changeset/40142188f76a/ Log: move _struct.error to interp level, test/fix its __module__ diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -46,6 +46,8 @@ The variable struct.error is an exception raised on errors.""" interpleveldefs = { + 'error': 'interp_struct.get_error(space)', + 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'pack_into': 'interp_struct.pack_into', @@ -56,5 +58,4 @@ } appleveldefs = { - 'error': 'app_struct.error', } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py deleted file mode 100644 --- a/pypy/module/struct/app_struct.py +++ /dev/null @@ -1,9 +0,0 @@ -# NOT_RPYTHON -""" -Application-level definitions for the struct module. -""" - - -class error(Exception): - """Exception raised on various occasions; argument is a string - describing what is wrong.""" diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -12,6 +12,15 @@ ) +class Cache: + def __init__(self, space): + self.error = space.new_exception_class("struct.error", space.w_Exception) + + +def get_error(space): + return space.fromcache(Cache).error + + @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) @@ -24,9 +33,7 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return fmtiter.totalsize @@ -42,9 +49,7 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return space.wrap(fmtiter.result.build()) @@ -57,9 +62,7 @@ offset += buf.getlength() size = len(res) if offset < 0 or (buf.getlength() - offset) < size: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, + raise oefmt(get_error(space), "pack_into requires a buffer of at least %d bytes", size) buf.setslice(offset, res) @@ -72,9 +75,7 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) @@ -89,15 +90,11 @@ size = _calcsize(space, format) buf = space.getarg_w('z*', w_buffer) if buf is None: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, "unpack_from requires a buffer argument") + raise oefmt(get_error(space), "unpack_from requires a buffer argument") if offset < 0: offset += buf.getlength() if offset < 0 or (buf.getlength() - offset) < size: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, + raise oefmt(get_error(space), "unpack_from requires a buffer of at least %d bytes", size) buf = SubBuffer(buf, offset, size) diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -24,6 +24,10 @@ struct.error should be an exception class. """ assert issubclass(self.struct.error, Exception) + assert self.struct.error.__mro__ == (self.struct.error, Exception, + BaseException, object) + assert self.struct.error.__name__ == "error" + assert self.struct.error.__module__ == "struct" def test_calcsize_standard(self): """ From noreply at buildbot.pypy.org Tue May 6 04:11:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:37 +0200 (CEST) Subject: [pypy-commit] pypy default: promote format on struct.Struct objects Message-ID: <20140506021137.DD3F71C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71306:d93da947bbf6 Date: 2014-05-05 18:35 -0400 http://bitbucket.org/pypy/pypy/changeset/d93da947bbf6/ Log: promote format on struct.Struct objects diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -44,7 +44,6 @@ """) def test_struct_object(self): - skip("XXX broken") def main(n): import struct s = struct.Struct("i") diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -115,18 +115,18 @@ return self def descr_pack(self, space, args_w): - return pack(space, self.format, args_w) + return pack(space, jit.promote_string(self.format), args_w) @unwrap_spec(offset=int) def descr_pack_into(self, space, w_buffer, offset, args_w): - return pack_into(space, self.format, w_buffer, offset, args_w) + return pack_into(space, jit.promote_string(self.format), w_buffer, offset, args_w) def descr_unpack(self, space, w_str): - return unpack(space, self.format, w_str) + return unpack(space, jit.promote_string(self.format), w_str) @unwrap_spec(offset=int) def descr_unpack_from(self, space, w_buffer, offset=0): - return unpack_from(space, self.format, w_buffer, offset) + return unpack_from(space, jit.promote_string(self.format), w_buffer, offset) W_Struct.typedef = TypeDef("Struct", __new__=interp2app(W_Struct.descr__new__.im_func), From noreply at buildbot.pypy.org Tue May 6 04:11:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:39 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140506021139.1F8311C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71307:ad42ca1b0c3e Date: 2014-05-05 22:09 -0400 http://bitbucket.org/pypy/pypy/changeset/ad42ca1b0c3e/ Log: merge heads diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -84,7 +84,7 @@ * Fix issues with reimporting builtin modules -* Fix a RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port +* Fix an RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port * Support for corner cases on objects with __int__ and __float__ methods @@ -125,7 +125,7 @@ and scalars were corrected. We are slowly approaching our goal of passing the NumPy test suite. We still do not support object or unicode ndarrays. -* speed of iteration in dot() is now within 1.5x of the NumPy c +* Speed of iteration in dot() is now within 1.5x of the NumPy c implementation (without BLAS acceleration). Since the same array iterator is used throughout the ``_numpy`` module, speed increases should be apparent in all NumPy functionality. @@ -135,7 +135,7 @@ * A cffi-based ``numpy.random`` module is available as a branch; it will be merged soon after this release. -* enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load +* Enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load memory operations used in NumPy arrays. Further work remains here in virtualizing the alloc_raw_storage when possible. This will allow scalars to have storages but still be virtualized when possible in loops. diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,7 +442,6 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', - __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __nonzero__ = interp2app(W_CData.nonzero), diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -152,7 +152,7 @@ space.call_method(self.getdict(space), "update", w_dict) W_BytesIO.typedef = TypeDef( - 'BytesIO', W_BufferedIOBase.typedef, + '_io.BytesIO', W_BufferedIOBase.typedef, __new__ = generic_new_descr(W_BytesIO), __init__ = interp2app(W_BytesIO.descr_init), diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -429,7 +429,7 @@ return w_size W_FileIO.typedef = TypeDef( - 'FileIO', W_RawIOBase.typedef, + '_io.FileIO', W_RawIOBase.typedef, __new__ = interp2app(W_FileIO.descr_new.im_func), __init__ = interp2app(W_FileIO.descr_init), __repr__ = interp2app(W_FileIO.repr_w), diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,9 +27,9 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - 'BlockingIOError', W_IOError.typedef, - __doc__ = ("Exception raised when I/O would block " - "on a non-blocking I/O stream"), + '_io.BlockingIOError', W_IOError.typedef, + __doc__ = ("Exception raised when I/O would block on a non-blocking " + "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), __init__ = interp2app(W_BlockingIOError.descr_init), characters_written = interp_attrproperty('written', W_BlockingIOError), diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -288,7 +288,7 @@ break W_IOBase.typedef = TypeDef( - '_IOBase', + '_io._IOBase', __new__ = generic_new_descr(W_IOBase), __enter__ = interp2app(W_IOBase.enter_w), __exit__ = interp2app(W_IOBase.exit_w), @@ -359,7 +359,7 @@ return space.wrap(builder.build()) W_RawIOBase.typedef = TypeDef( - '_RawIOBase', W_IOBase.typedef, + '_io._RawIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_RawIOBase), read = interp2app(W_RawIOBase.read_w), diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -178,7 +178,7 @@ space.call_method(self.w_decoder, "setstate", w_state) W_IncrementalNewlineDecoder.typedef = TypeDef( - 'IncrementalNewlineDecoder', + '_io.IncrementalNewlineDecoder', __new__ = generic_new_descr(W_IncrementalNewlineDecoder), __init__ = interp2app(W_IncrementalNewlineDecoder.descr_init), @@ -255,7 +255,7 @@ W_TextIOBase.typedef = TypeDef( - '_TextIOBase', W_IOBase.typedef, + '_io._TextIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_TextIOBase), read = interp2app(W_TextIOBase.read_w), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -340,3 +340,9 @@ assert res == "world\n" assert f.newlines == "\n" assert type(f.newlines) is unicode + + def test_mod(self): + import _io + typemods = dict((t, t.__module__) for t in vars(_io).values() + if isinstance(t, type)) + assert all(mod in ('io', '_io') for mod in typemods.values()), typemods diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -267,7 +267,7 @@ # Only required on armhf and mips{,el}, not armel. But there's no way to # detect armhf without shelling out if (platform.architecture()[0] == '64bit' - or platform.machine().startswith(('arm', 'mips'))): + or platform.machine().startswith(('arm', 'mips', 'ppc'))): host_factory = LinuxPIC else: host_factory = Linux From noreply at buildbot.pypy.org Tue May 6 04:11:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:40 +0200 (CEST) Subject: [pypy-commit] pypy default: backout 3a5da776e68e, this __module__ was here for a reason Message-ID: <20140506021140.3EB5D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71308:8d657ce17ec5 Date: 2014-05-05 22:10 -0400 http://bitbucket.org/pypy/pypy/changeset/8d657ce17ec5/ Log: backout 3a5da776e68e, this __module__ was here for a reason diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,6 +442,7 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', + __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __nonzero__ = interp2app(W_CData.nonzero), From noreply at buildbot.pypy.org Tue May 6 04:11:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:11:56 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: backout 3a5da776e68e, this __module__ was here for a reason Message-ID: <20140506021156.029AB1C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71309:7267908b2526 Date: 2014-05-05 22:10 -0400 http://bitbucket.org/pypy/pypy/changeset/7267908b2526/ Log: backout 3a5da776e68e, this __module__ was here for a reason diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,6 +442,7 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', + __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __nonzero__ = interp2app(W_CData.nonzero), From noreply at buildbot.pypy.org Tue May 6 04:21:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:21:16 +0200 (CEST) Subject: [pypy-commit] pypy default: properly test/fix _io __modules__s Message-ID: <20140506022116.CF31E1C3569@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71310:13f0fb1ddec1 Date: 2014-05-05 22:20 -0400 http://bitbucket.org/pypy/pypy/changeset/13f0fb1ddec1/ Log: properly test/fix _io __modules__s diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,7 +27,7 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - '_io.BlockingIOError', W_IOError.typedef, + 'BlockingIOError', W_IOError.typedef, __doc__ = ("Exception raised when I/O would block on a non-blocking " "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -345,4 +345,10 @@ import _io typemods = dict((t, t.__module__) for t in vars(_io).values() if isinstance(t, type)) - assert all(mod in ('io', '_io') for mod in typemods.values()), typemods + for t, mod in typemods.items(): + if t is _io.BlockingIOError: + assert mod == '__builtin__' + elif t is _io.UnsupportedOperation: + assert mod == 'io' + else: + assert mod == '_io' diff --git a/pypy/module/_io/test/test_stringio.py b/pypy/module/_io/test/test_stringio.py --- a/pypy/module/_io/test/test_stringio.py +++ b/pypy/module/_io/test/test_stringio.py @@ -146,11 +146,6 @@ exc_info = raises(TypeError, sio.write, 3) assert "int" in exc_info.value.args[0] - def test_module(self): - import io - - assert io.StringIO.__module__ == "_io" - def test_newline_none(self): import io From noreply at buildbot.pypy.org Tue May 6 04:21:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:21:41 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: properly test/fix _io __modules__s Message-ID: <20140506022141.14B8A1C3569@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71311:186c5a7009ed Date: 2014-05-05 22:20 -0400 http://bitbucket.org/pypy/pypy/changeset/186c5a7009ed/ Log: properly test/fix _io __modules__s diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,7 +27,7 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - '_io.BlockingIOError', W_IOError.typedef, + 'BlockingIOError', W_IOError.typedef, __doc__ = ("Exception raised when I/O would block on a non-blocking " "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -345,4 +345,10 @@ import _io typemods = dict((t, t.__module__) for t in vars(_io).values() if isinstance(t, type)) - assert all(mod in ('io', '_io') for mod in typemods.values()), typemods + for t, mod in typemods.items(): + if t is _io.BlockingIOError: + assert mod == '__builtin__' + elif t is _io.UnsupportedOperation: + assert mod == 'io' + else: + assert mod == '_io' diff --git a/pypy/module/_io/test/test_stringio.py b/pypy/module/_io/test/test_stringio.py --- a/pypy/module/_io/test/test_stringio.py +++ b/pypy/module/_io/test/test_stringio.py @@ -146,11 +146,6 @@ exc_info = raises(TypeError, sio.write, 3) assert "int" in exc_info.value.args[0] - def test_module(self): - import io - - assert io.StringIO.__module__ == "_io" - def test_newline_none(self): import io From noreply at buildbot.pypy.org Tue May 6 04:44:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 04:44:05 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140506024405.2A28C1D23C3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71312:9401f74194b9 Date: 2014-05-05 22:43 -0400 http://bitbucket.org/pypy/pypy/changeset/9401f74194b9/ Log: merge default diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -442,6 +442,7 @@ W_CData.typedef = TypeDef( '_cffi_backend.CData', + __module__ = '_cffi_backend', __name__ = '', __repr__ = interp2app(W_CData.repr), __bool__ = interp2app(W_CData.bool), diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -27,7 +27,7 @@ self.written = written W_BlockingIOError.typedef = TypeDef( - '_io.BlockingIOError', W_IOError.typedef, + 'BlockingIOError', W_IOError.typedef, __doc__ = ("Exception raised when I/O would block on a non-blocking " "I/O stream"), __new__ = generic_new_descr(W_BlockingIOError), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -383,4 +383,10 @@ import _io typemods = dict((t, t.__module__) for t in vars(_io).values() if isinstance(t, type)) - assert all(mod in ('io', '_io') for mod in typemods.values()), typemods + for t, mod in typemods.items(): + if t is _io.BlockingIOError: + assert mod == '__builtin__' + elif t is _io.UnsupportedOperation: + assert mod == 'io' + else: + assert mod == '_io' diff --git a/pypy/module/_io/test/test_stringio.py b/pypy/module/_io/test/test_stringio.py --- a/pypy/module/_io/test/test_stringio.py +++ b/pypy/module/_io/test/test_stringio.py @@ -142,11 +142,6 @@ exc_info = raises(TypeError, sio.write, 3) assert "int" in exc_info.value.args[0] - def test_module(self): - import io - - assert io.StringIO.__module__ == "_io" - def test_newline_none(self): import io diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -56,7 +56,7 @@ guard_false(i99, descr=...) i100 = int_lshift(i98, 24) i101 = int_or(i97, i100) - i102 = getfield_raw(50657056, descr=) + i102 = getfield_raw(\d+, descr=) i103 = int_lt(i102, 0) guard_false(i103, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -348,51 +348,6 @@ loop, = log.loops_by_id("globalread", is_entry_bridge=True) assert len(loop.ops_by_id("globalread")) == 0 - def test_struct_module(self): - def main(): - import struct - i = 1 - while i < 1000: - x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct - i += x / i - return i - - log = self.run(main) - assert log.result == main() - - loop, = log.loops_by_id("struct") - if sys.maxint == 2 ** 63 - 1: - extra = """ - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - """ - else: - extra = "" - # This could, of course stand some improvement, to remove all these - # arithmatic ops, but we've removed all the core overhead. - assert loop.match_by_id("struct", """ - guard_not_invalidated(descr=...) - # struct.pack - %(32_bit_only)s - i11 = int_and(i4, 255) - i13 = int_rshift(i4, 8) - i14 = int_and(i13, 255) - i16 = int_rshift(i13, 8) - i17 = int_and(i16, 255) - i19 = int_rshift(i16, 8) - i20 = int_and(i19, 255) - - # struct.unpack - i22 = int_lshift(i14, 8) - i23 = int_or(i11, i22) - i25 = int_lshift(i17, 16) - i26 = int_or(i23, i25) - i28 = int_ge(i20, 128) - guard_false(i28, descr=...) - i30 = int_lshift(i20, 24) - i31 = int_or(i26, i30) - """ % {"32_bit_only": extra}) - def test_eval(self): def main(): i = 1 diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -0,0 +1,84 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestStruct(BaseTestPyPyC): + def test_struct_function(self): + def main(n): + import struct + i = 1 + while i < n: + x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct + i += x / i + return i + + log = self.run(main, [1000]) + assert log.result == main(1000) + + loop, = log.loops_by_filename(self.filepath) + # This could, of course stand some improvement, to remove all these + # arithmatic ops, but we've removed all the core overhead. + assert loop.match_by_id("struct", """ + guard_not_invalidated(descr=...) + # struct.pack + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """) + + def test_struct_object(self): + def main(n): + import struct + s = struct.Struct("i") + i = 1 + while i < n: + x = s.unpack(s.pack(i))[0] # ID: struct + i += x / i + return i + + log = self.run(main, [1000]) + assert log.result == main(1000) + + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('struct', """ + guard_not_invalidated(descr=...) + # struct.pack + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """) diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -48,6 +48,8 @@ applevel_name = '_struct' interpleveldefs = { + 'error': 'interp_struct.get_error(space)', + 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'pack_into': 'interp_struct.pack_into', @@ -59,5 +61,4 @@ } appleveldefs = { - 'error': 'app_struct.error', } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py deleted file mode 100644 --- a/pypy/module/struct/app_struct.py +++ /dev/null @@ -1,9 +0,0 @@ -# NOT_RPYTHON -""" -Application-level definitions for the struct module. -""" - - -class error(Exception): - """Exception raised on various occasions; argument is a string - describing what is wrong.""" diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -2,7 +2,6 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator -from rpython.tool.sourcetools import func_with_new_name from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -13,6 +12,15 @@ ) +class Cache: + def __init__(self, space): + self.error = space.new_exception_class("struct.error", space.w_Exception) + + +def get_error(space): + return space.fromcache(Cache).error + + @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) @@ -25,9 +33,7 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return fmtiter.totalsize @@ -43,24 +49,20 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return space.wrapbytes(fmtiter.result.build()) # XXX inefficient @unwrap_spec(format=str, offset=int) -def pack_into(space, format, w_buf, offset, args_w): +def pack_into(space, format, w_buffer, offset, args_w): res = pack(space, format, args_w).bytes_w(space) - buf = space.writebuf_w(w_buf) + buf = space.writebuf_w(w_buffer) if offset < 0: offset += buf.getlength() size = len(res) if offset < 0 or (buf.getlength() - offset) < size: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, + raise oefmt(get_error(space), "pack_into requires a buffer of at least %d bytes", size) buf.setslice(offset, res) @@ -73,9 +75,7 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) def clearcache(space): @@ -96,9 +96,7 @@ if offset < 0: offset += buf.getlength() if offset < 0 or (buf.getlength() - offset) < size: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, + raise oefmt(get_error(space), "unpack_from requires a buffer of at least %d bytes", size) buf = SubBuffer(buf, offset, size) @@ -118,21 +116,19 @@ W_Struct.__init__(self, space, format) return self - def wrap_struct_method(name): - def impl(self, space, __args__): - w_module = space.getbuiltinmodule('struct') - w_method = space.getattr(w_module, space.wrap(name)) - return space.call_obj_args( - w_method, space.wrap(self.format), __args__ - ) + def descr_pack(self, space, args_w): + return pack(space, jit.promote_string(self.format), args_w) - return func_with_new_name(impl, 'descr_' + name) + @unwrap_spec(offset=int) + def descr_pack_into(self, space, w_buffer, offset, args_w): + return pack_into(space, jit.promote_string(self.format), w_buffer, offset, args_w) - descr_pack = wrap_struct_method("pack") - descr_unpack = wrap_struct_method("unpack") - descr_pack_into = wrap_struct_method("pack_into") - descr_unpack_from = wrap_struct_method("unpack_from") + def descr_unpack(self, space, w_str): + return unpack(space, jit.promote_string(self.format), w_str) + @unwrap_spec(offset=int) + def descr_unpack_from(self, space, w_buffer, offset=0): + return unpack_from(space, jit.promote_string(self.format), w_buffer, offset) W_Struct.typedef = TypeDef("Struct", __new__=interp2app(W_Struct.descr__new__.im_func), diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -24,6 +24,10 @@ struct.error should be an exception class. """ assert issubclass(self.struct.error, Exception) + assert self.struct.error.__mro__ == (self.struct.error, Exception, + BaseException, object) + assert self.struct.error.__name__ == "error" + assert self.struct.error.__module__ == "struct" def test_calcsize_standard(self): """ @@ -396,6 +400,11 @@ assert type(obj2) is float assert obj2 == 42.3 + def test_struct_object(self): + s = self.struct.Struct('i') + assert s.unpack(s.pack(42)) == (42,) + assert s.unpack_from(memoryview(s.pack(42))) == (42,) + def test_trailing_counter(self): import array store = array.array('b', b' '*100) From noreply at buildbot.pypy.org Tue May 6 05:15:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 05:15:08 +0200 (CEST) Subject: [pypy-commit] pypy py3k: copy_reg is copyreg on py3k Message-ID: <20140506031508.529B51C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71313:3d70a53d701b Date: 2014-05-05 23:14 -0400 http://bitbucket.org/pypy/pypy/changeset/3d70a53d701b/ Log: copy_reg is copyreg on py3k diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -21,7 +21,7 @@ assert isinstance(ast.__version__, str) def test_flags(self): - from copy_reg import _HEAPTYPE + from copyreg import _HEAPTYPE assert self.ast.AST.__flags__ & _HEAPTYPE == 0 assert self.ast.Module.__flags__ & _HEAPTYPE == _HEAPTYPE From noreply at buildbot.pypy.org Tue May 6 05:18:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 05:18:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k: __builtin__ -> builtins Message-ID: <20140506031844.A04981C01DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k Changeset: r71314:920b78b33122 Date: 2014-05-05 23:18 -0400 http://bitbucket.org/pypy/pypy/changeset/920b78b33122/ Log: __builtin__ -> builtins diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -385,7 +385,7 @@ if isinstance(t, type)) for t, mod in typemods.items(): if t is _io.BlockingIOError: - assert mod == '__builtin__' + assert mod == 'builtins' elif t is _io.UnsupportedOperation: assert mod == 'io' else: From noreply at buildbot.pypy.org Tue May 6 07:42:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 07:42:36 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test Message-ID: <20140506054236.A123E1C0112@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71315:777bfc76bd25 Date: 2014-05-06 08:41 +0300 http://bitbucket.org/pypy/pypy/changeset/777bfc76bd25/ Log: fix test diff --git a/rpython/translator/goal/richards.py b/rpython/translator/goal/richards.py --- a/rpython/translator/goal/richards.py +++ b/rpython/translator/goal/richards.py @@ -102,13 +102,13 @@ self.task_waiting = False self.task_holding = False return self - + def waitingWithPacket(self): self.packet_pending = True self.task_waiting = True self.task_holding = False return self - + def isPacketPending(self): return self.packet_pending @@ -154,6 +154,7 @@ self.holdCount = 0 self.qpktCount = 0 +taskWorkArea = TaskWorkArea() class Task(TaskState): @@ -235,7 +236,7 @@ if t is None: raise Exception("Bad task id %d" % id) return t - + # DeviceTask @@ -309,7 +310,7 @@ else: i.control = i.control/2 ^ 0xd008 return self.release(I_DEVB) - + # WorkTask @@ -384,7 +385,7 @@ wkq = None; DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec()); DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec()); - + schedule() if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: From noreply at buildbot.pypy.org Tue May 6 10:28:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 10:28:20 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into release to profit from last-minute struct, buffer improvements Message-ID: <20140506082820.88C351C01DE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71316:3d7f7e8ac940 Date: 2014-05-06 11:08 +0300 http://bitbucket.org/pypy/pypy/changeset/3d7f7e8ac940/ Log: merge default into release to profit from last-minute struct, buffer improvements diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -390,11 +390,7 @@ self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - from pypy.interpreter.pycode import cpython_magic, default_magic - self.our_magic = default_magic - self.host_magic = cpython_magic # can be overridden to a subclass - self.initialize() def startup(self): diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -612,6 +612,15 @@ data[index] = char array._charbuf_stop() + def getslice(self, start, stop, step, size): + if step == 1: + data = self.array._charbuf_start() + try: + return rffi.charpsize2str(rffi.ptradd(data, start), size) + finally: + self.array._charbuf_stop() + return Buffer.getslice(self, start, stop, step, size) + def get_raw_address(self): return self.array._charbuf_start() diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -40,10 +40,10 @@ self.check_valid() return self.space.wrap(self.mmap.read(num)) - @unwrap_spec(tofind='bufferstr') - def find(self, tofind, w_start=None, w_end=None): + def find(self, w_tofind, w_start=None, w_end=None): self.check_valid() space = self.space + tofind = space.getarg_w('s#', w_tofind) if w_start is None: start = self.mmap.pos else: @@ -54,10 +54,10 @@ end = space.getindex_w(w_end, None) return space.wrap(self.mmap.find(tofind, start, end)) - @unwrap_spec(tofind='bufferstr') - def rfind(self, tofind, w_start=None, w_end=None): + def rfind(self, w_tofind, w_start=None, w_end=None): self.check_valid() space = self.space + tofind = space.getarg_w('s#', w_tofind) if w_start is None: start = self.mmap.pos else: @@ -87,9 +87,9 @@ except OSError, e: raise mmap_error(self.space, e) - @unwrap_spec(data='bufferstr') - def write(self, data): + def write(self, w_data): self.check_valid() + data = self.space.getarg_w('s#', w_data) self.check_writeable() try: self.mmap.write(data) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -142,12 +142,13 @@ else: return space.wrap(s) - at unwrap_spec(fd=c_int, data='bufferstr') -def write(space, fd, data): + at unwrap_spec(fd=c_int) +def write(space, fd, w_data): """Write a string to a file descriptor. Return the number of bytes actually written, which may be smaller than len(data).""" + data = space.getarg_w('s*', w_data) try: - res = os.write(fd, data) + res = os.write(fd, data.as_str()) except OSError, e: raise wrap_oserror(space, e) else: diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -25,3 +25,38 @@ guard_true(i69, descr=...) --TICK-- """) + + def test_struct_unpack(self): + def main(n): + import struct + import array + a = array.array('c', struct.pack('i', 42)) + i = 0 + while i < n: + i += 1 + struct.unpack('i', a) # ID: unpack + return i + log = self.run(main, [1000]) + assert log.result == 1000 + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('unpack', """ + guard_not_invalidated(descr=...) + p90 = newstr(4) + call(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) + guard_no_exception(descr=...) + i91 = strgetitem(p90, 0) + i92 = strgetitem(p90, 1) + i93 = int_lshift(i92, 8) + i94 = int_or(i91, i93) + i95 = strgetitem(p90, 2) + i96 = int_lshift(i95, 16) + i97 = int_or(i94, i96) + i98 = strgetitem(p90, 3) + i99 = int_ge(i98, 128) + guard_false(i99, descr=...) + i100 = int_lshift(i98, 24) + i101 = int_or(i97, i100) + i102 = getfield_raw(\d+, descr=) + i103 = int_lt(i102, 0) + guard_false(i103, descr=...) + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -348,51 +348,6 @@ loop, = log.loops_by_id("globalread", is_entry_bridge=True) assert len(loop.ops_by_id("globalread")) == 0 - def test_struct_module(self): - def main(): - import struct - i = 1 - while i < 1000: - x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct - i += x / i - return i - - log = self.run(main) - assert log.result == main() - - loop, = log.loops_by_id("struct") - if sys.maxint == 2 ** 63 - 1: - extra = """ - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - """ - else: - extra = "" - # This could, of course stand some improvement, to remove all these - # arithmatic ops, but we've removed all the core overhead. - assert loop.match_by_id("struct", """ - guard_not_invalidated(descr=...) - # struct.pack - %(32_bit_only)s - i11 = int_and(i4, 255) - i13 = int_rshift(i4, 8) - i14 = int_and(i13, 255) - i16 = int_rshift(i13, 8) - i17 = int_and(i16, 255) - i19 = int_rshift(i16, 8) - i20 = int_and(i19, 255) - - # struct.unpack - i22 = int_lshift(i14, 8) - i23 = int_or(i11, i22) - i25 = int_lshift(i17, 16) - i26 = int_or(i23, i25) - i28 = int_ge(i20, 128) - guard_false(i28, descr=...) - i30 = int_lshift(i20, 24) - i31 = int_or(i26, i30) - """ % {"32_bit_only": extra}) - def test_eval(self): def main(): i = 1 diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -0,0 +1,84 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestStruct(BaseTestPyPyC): + def test_struct_function(self): + def main(n): + import struct + i = 1 + while i < n: + x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct + i += x / i + return i + + log = self.run(main, [1000]) + assert log.result == main(1000) + + loop, = log.loops_by_filename(self.filepath) + # This could, of course stand some improvement, to remove all these + # arithmatic ops, but we've removed all the core overhead. + assert loop.match_by_id("struct", """ + guard_not_invalidated(descr=...) + # struct.pack + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """) + + def test_struct_object(self): + def main(n): + import struct + s = struct.Struct("i") + i = 1 + while i < n: + x = s.unpack(s.pack(i))[0] # ID: struct + i += x / i + return i + + log = self.run(main, [1000]) + assert log.result == main(1000) + + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id('struct', """ + guard_not_invalidated(descr=...) + # struct.pack + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + i11 = int_and(i4, 255) + i13 = int_rshift(i4, 8) + i14 = int_and(i13, 255) + i16 = int_rshift(i13, 8) + i17 = int_and(i16, 255) + i19 = int_rshift(i16, 8) + i20 = int_and(i19, 255) + + # struct.unpack + i22 = int_lshift(i14, 8) + i23 = int_or(i11, i22) + i25 = int_lshift(i17, 16) + i26 = int_or(i23, i25) + i28 = int_ge(i20, 128) + guard_false(i28, descr=...) + i30 = int_lshift(i20, 24) + i31 = int_or(i26, i30) + """) diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -46,6 +46,8 @@ The variable struct.error is an exception raised on errors.""" interpleveldefs = { + 'error': 'interp_struct.get_error(space)', + 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'pack_into': 'interp_struct.pack_into', @@ -56,5 +58,4 @@ } appleveldefs = { - 'error': 'app_struct.error', } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py deleted file mode 100644 --- a/pypy/module/struct/app_struct.py +++ /dev/null @@ -1,9 +0,0 @@ -# NOT_RPYTHON -""" -Application-level definitions for the struct module. -""" - - -class error(Exception): - """Exception raised on various occasions; argument is a string - describing what is wrong.""" diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -8,7 +8,6 @@ class PackFormatIterator(FormatIterator): - def __init__(self, space, args_w, size): self.space = space self.args_w = args_w @@ -105,11 +104,11 @@ class UnpackFormatIterator(FormatIterator): - - def __init__(self, space, input): + def __init__(self, space, buf): self.space = space - self.input = input - self.inputpos = 0 + self.buf = buf + self.length = buf.getlength() + self.pos = 0 self.result_w = [] # list of wrapped objects # See above comment on operate. @@ -124,18 +123,18 @@ _operate_is_specialized_ = True def align(self, mask): - self.inputpos = (self.inputpos + mask) & ~mask + self.pos = (self.pos + mask) & ~mask def finished(self): - if self.inputpos != len(self.input): + if self.pos != self.length: raise StructError("unpack str size too long for format") def read(self, count): - end = self.inputpos + count - if end > len(self.input): + end = self.pos + count + if end > self.length: raise StructError("unpack str size too short for format") - s = self.input[self.inputpos : end] - self.inputpos = end + s = self.buf.getslice(self.pos, end, 1, count) + self.pos = end return s @specialize.argtype(1) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,7 +1,7 @@ from rpython.rlib import jit +from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator -from rpython.tool.sourcetools import func_with_new_name from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -12,6 +12,15 @@ ) +class Cache: + def __init__(self, space): + self.error = space.new_exception_class("struct.error", space.w_Exception) + + +def get_error(space): + return space.fromcache(Cache).error + + @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) @@ -24,9 +33,7 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return fmtiter.totalsize @@ -42,62 +49,56 @@ except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return space.wrap(fmtiter.result.build()) # XXX inefficient @unwrap_spec(format=str, offset=int) -def pack_into(space, format, w_buf, offset, args_w): +def pack_into(space, format, w_buffer, offset, args_w): res = pack(space, format, args_w).str_w(space) - buf = space.writebuf_w(w_buf) + buf = space.writebuf_w(w_buffer) if offset < 0: offset += buf.getlength() size = len(res) if offset < 0 or (buf.getlength() - offset) < size: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, + raise oefmt(get_error(space), "pack_into requires a buffer of at least %d bytes", size) buf.setslice(offset, res) - at unwrap_spec(format=str, input='bufferstr') -def unpack(space, format, input): - fmtiter = UnpackFormatIterator(space, input) +def _unpack(space, format, buf): + fmtiter = UnpackFormatIterator(space, buf) try: fmtiter.interpret(format) except StructOverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise OperationError(w_error, space.wrap(e.msg)) + raise OperationError(get_error(space), space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) -# XXX inefficient + at unwrap_spec(format=str) +def unpack(space, format, w_str): + buf = space.getarg_w('s*', w_str) + return _unpack(space, format, buf) + + @unwrap_spec(format=str, offset=int) -def unpack_from(space, format, w_buf, offset=0): +def unpack_from(space, format, w_buffer, offset=0): size = _calcsize(space, format) - buf = space.getarg_w('z*', w_buf) + buf = space.getarg_w('z*', w_buffer) if buf is None: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, "unpack_from requires a buffer argument") + raise oefmt(get_error(space), "unpack_from requires a buffer argument") if offset < 0: offset += buf.getlength() if offset < 0 or (buf.getlength() - offset) < size: - w_module = space.getbuiltinmodule('struct') - w_error = space.getattr(w_module, space.wrap('error')) - raise oefmt(w_error, + raise oefmt(get_error(space), "unpack_from requires a buffer of at least %d bytes", size) - data = buf.getslice(offset, offset + size, 1, size) - return unpack(space, format, data) + buf = SubBuffer(buf, offset, size) + return _unpack(space, format, buf) class W_Struct(W_Root): @@ -113,21 +114,19 @@ W_Struct.__init__(self, space, format) return self - def wrap_struct_method(name): - def impl(self, space, __args__): - w_module = space.getbuiltinmodule('struct') - w_method = space.getattr(w_module, space.wrap(name)) - return space.call_obj_args( - w_method, space.wrap(self.format), __args__ - ) + def descr_pack(self, space, args_w): + return pack(space, jit.promote_string(self.format), args_w) - return func_with_new_name(impl, 'descr_' + name) + @unwrap_spec(offset=int) + def descr_pack_into(self, space, w_buffer, offset, args_w): + return pack_into(space, jit.promote_string(self.format), w_buffer, offset, args_w) - descr_pack = wrap_struct_method("pack") - descr_unpack = wrap_struct_method("unpack") - descr_pack_into = wrap_struct_method("pack_into") - descr_unpack_from = wrap_struct_method("unpack_from") + def descr_unpack(self, space, w_str): + return unpack(space, jit.promote_string(self.format), w_str) + @unwrap_spec(offset=int) + def descr_unpack_from(self, space, w_buffer, offset=0): + return unpack_from(space, jit.promote_string(self.format), w_buffer, offset) W_Struct.typedef = TypeDef("Struct", __new__=interp2app(W_Struct.descr__new__.im_func), diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -24,6 +24,10 @@ struct.error should be an exception class. """ assert issubclass(self.struct.error, Exception) + assert self.struct.error.__mro__ == (self.struct.error, Exception, + BaseException, object) + assert self.struct.error.__name__ == "error" + assert self.struct.error.__module__ == "struct" def test_calcsize_standard(self): """ @@ -403,6 +407,11 @@ assert type(obj2) is float assert obj2 == 42.3 + def test_struct_object(self): + s = self.struct.Struct('i') + assert s.unpack(s.pack(42)) == (42,) + assert s.unpack_from(memoryview(s.pack(42))) == (42,) + class AppTestStructBuffer(object): spaceconfig = dict(usemodules=['struct', '__pypy__']) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -353,7 +353,7 @@ result = description.FunctionDesc(self, pyobj) elif isinstance(pyobj, (type, types.ClassType)): if pyobj is object: - raise Exception, "ClassDesc for object not supported" + raise Exception("ClassDesc for object not supported") if pyobj.__module__ == '__builtin__': # avoid making classdefs for builtin types result = self.getfrozen(pyobj) else: @@ -591,7 +591,7 @@ for name, value in dict.iteritems(): if value is func: return cls, name - raise Exception, "could not match bound-method to attribute name: %r" % (boundmeth,) + raise Exception("could not match bound-method to attribute name: %r" % (boundmeth,)) def ishashable(x): try: diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -65,14 +65,14 @@ s_start, s_stop = args[:2] s_step = args[2] else: - raise Exception, "range() takes 1 to 3 arguments" + raise Exception("range() takes 1 to 3 arguments") empty = False # so far if not s_step.is_constant(): step = 0 # this case signals a variable step else: step = s_step.const if step == 0: - raise Exception, "range() with step zero" + raise Exception("range() with step zero") if s_start.is_constant() and s_stop.is_constant(): try: if len(xrange(s_start.const, s_stop.const, step)) == 0: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -394,7 +394,7 @@ return SomePBC([subdef.classdesc for subdef in self.getallsubdefs()]) def _freeze_(self): - raise Exception, "ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly" + raise Exception("ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly") # ____________________________________________________________ diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -30,7 +30,7 @@ except (KeyboardInterrupt, SystemExit): raise except: - raise Exception, "broken specialize directive parms: %s" % directive + raise Exception("broken specialize directive parms: %s" % directive) name = name.replace(':', '__') try: specializer = getattr(pol, name) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1435,7 +1435,7 @@ elif a==2: raise X(1) elif a==3: - raise X,4 + raise X(4) else: try: l[0] @@ -3628,7 +3628,7 @@ def f(): e = OverflowError() lle = cast_instance_to_base_ptr(e) - raise Exception, lle + raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() py.test.raises(AssertionError, a.build_types, f, []) diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -7,7 +7,6 @@ import __builtin__ from rpython.tool.error import source_lines -from rpython.tool.stdlib_opcode import host_bytecode_spec from rpython.rlib import rstackovf from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, @@ -305,8 +304,6 @@ ] class FlowContext(object): - opcode_method_names = host_bytecode_spec.method_names - def __init__(self, graph, code): self.graph = graph func = graph.func diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -1,7 +1,7 @@ """ Buffer protocol support. """ -from rpython.rlib.objectmodel import import_from_mixin +from rpython.rlib import jit class Buffer(object): @@ -61,7 +61,7 @@ if step == 1: assert 0 <= start <= stop return self.value[start:stop] - return "".join([self.value[start + i*step] for i in xrange(size)]) + return Buffer.getslice(self, start, stop, step, size) class SubBuffer(Buffer): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -593,7 +593,7 @@ def can_enter_jit(_self, **livevars): if _self.autoreds: - raise TypeError, "Cannot call can_enter_jit on a driver with reds='auto'" + raise TypeError("Cannot call can_enter_jit on a driver with reds='auto'") # special-cased by ExtRegistryEntry if _self.check_untranslated: _self._check_arguments(livevars, False) diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -109,11 +109,11 @@ def _check_type(TYPE): if isinstance(TYPE, lltype.Ptr): if TYPE.TO._gckind != 'raw': - raise TypeError, "Can only push raw values to C, not 'gc'" + raise TypeError("Can only push raw values to C, not 'gc'") # XXX probably we should recursively check for struct fields here, # lets just ignore that for now if isinstance(TYPE.TO, lltype.Array) and 'nolength' not in TYPE.TO._hints: - raise TypeError, "Can only push to C arrays without length info" + raise TypeError("Can only push to C arrays without length info") class ArgChain(object): @@ -136,7 +136,7 @@ elif TYPE is rffi.FLOAT: cls = SingleFloatArg else: - raise TypeError, 'Unsupported argument type: %s' % TYPE + raise TypeError('Unsupported argument type: %s' % TYPE) self._append(cls(val)) return self @@ -247,8 +247,8 @@ # assuming that argchain is completely virtual. self = jit.promote(self) if argchain.numargs != len(self.argtypes): - raise TypeError, 'Wrong number of arguments: %d expected, got %d' %\ - (len(self.argtypes), argchain.numargs) + raise TypeError('Wrong number of arguments: %d expected, got %d' % + (len(self.argtypes), argchain.numargs)) ll_args = self._prepare() i = 0 arg = argchain.first @@ -273,7 +273,7 @@ elif RESULT is lltype.Void: return self._do_call_void(self.funcsym, ll_args) else: - raise TypeError, 'Unsupported result type: %s' % RESULT + raise TypeError('Unsupported result type: %s' % RESULT) # return rffi.cast(RESULT, res) @@ -430,7 +430,7 @@ def getpointer_by_ordinal(self, name, argtypes, restype, flags=FUNCFLAG_CDECL): - return Func('by_ordinal', argtypes, restype, + return Func('by_ordinal', argtypes, restype, dlsym_byordinal(self.lib, name), flags=flags, keepalive=self) def getaddressindll(self, name): diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -122,7 +122,7 @@ """ typecheck = kwds.pop('typecheck', True) if types_ and kwds: - raise TypeError, 'Cannot mix positional arguments and keywords' + raise TypeError('Cannot mix positional arguments and keywords') if not typecheck: def decorator(f): @@ -177,7 +177,7 @@ if not s_expected.contains(s_argtype): msg = "%s argument %r must be of type %s" % ( f.func_name, srcargs[i], expected_type) - raise TypeError, msg + raise TypeError(msg) # template = """ def {name}({arglist}): @@ -576,7 +576,7 @@ # ____________________________________________________________ def hlinvoke(repr, llcallable, *args): - raise TypeError, "hlinvoke is meant to be rtyped and not called direclty" + raise TypeError("hlinvoke is meant to be rtyped and not called direclty") def invoke_around_extcall(before, after): """Call before() before any external function call, and after() after. diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -173,7 +173,7 @@ if type(r) is long and not is_valid_int(r): # checks only if applicable to r's type. # this happens in the garbage collector. - raise OverflowError, "signed integer expression did overflow" + raise OverflowError("signed integer expression did overflow") return r # Strange things happening for float to int on 64 bit: @@ -213,7 +213,7 @@ return other_type if self_type.SIGNED == other_type.SIGNED: return build_int(None, self_type.SIGNED, max(self_type.BITS, other_type.BITS)) - raise AssertionError, "Merging these types (%s, %s) is not supported" % (self_type, other_type) + raise AssertionError("Merging these types (%s, %s) is not supported" % (self_type, other_type)) def signedtype(t): if t in (bool, int, long): diff --git a/rpython/rlib/rsre/rpy/sre_compile.py b/rpython/rlib/rsre/rpy/sre_compile.py --- a/rpython/rlib/rsre/rpy/sre_compile.py +++ b/rpython/rlib/rsre/rpy/sre_compile.py @@ -63,7 +63,7 @@ emit(OPCODES[ANY]) elif op in REPEATING_CODES: if flags & SRE_FLAG_TEMPLATE: - raise error, "internal: unsupported template operator" + raise error("internal: unsupported template operator") emit(OPCODES[REPEAT]) skip = _len(code); emit(0) emit(av[0]) @@ -112,7 +112,7 @@ else: lo, hi = av[1].getwidth() if lo != hi: - raise error, "look-behind requires fixed-width pattern" + raise error("look-behind requires fixed-width pattern") emit(lo) # look behind _compile(code, av[1], flags) emit(OPCODES[SUCCESS]) @@ -173,7 +173,7 @@ else: code[skipyes] = _len(code) - skipyes + 1 else: - raise ValueError, ("unsupported operand type", op) + raise ValueError("unsupported operand type", op) def _compile_charset(charset, flags, code, fixup=None): # compile charset subprogram @@ -201,7 +201,7 @@ else: emit(CHCODES[av]) else: - raise error, "internal: unsupported set operator" + raise error("internal: unsupported set operator") emit(OPCODES[FAILURE]) def _optimize_charset(charset, fixup): diff --git a/rpython/rlib/rsre/rpy/sre_parse.py b/rpython/rlib/rsre/rpy/sre_parse.py --- a/rpython/rlib/rsre/rpy/sre_parse.py +++ b/rpython/rlib/rsre/rpy/sre_parse.py @@ -75,7 +75,7 @@ if name is not None: ogid = self.groupdict.get(name, None) if ogid is not None: - raise error, ("redefinition of group name %s as group %d; " + raise error("redefinition of group name %s as group %d; " "was group %d" % (repr(name), gid, ogid)) self.groupdict[name] = gid self.open.append(gid) @@ -188,7 +188,7 @@ try: c = self.string[self.index + 1] except IndexError: - raise error, "bogus escape (end of line)" + raise error("bogus escape (end of line)") char = char + c self.index = self.index + len(char) self.next = char @@ -238,7 +238,7 @@ escape = escape + source.get() escape = escape[2:] if len(escape) != 2: - raise error, "bogus escape: %s" % repr("\\" + escape) + raise error("bogus escape: %s" % repr("\\" + escape)) return LITERAL, int(escape, 16) & 0xff elif c in OCTDIGITS: # octal escape (up to three digits) @@ -247,12 +247,12 @@ escape = escape[1:] return LITERAL, int(escape, 8) & 0xff elif c in DIGITS: - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) def _escape(source, escape, state): # handle escape code in expression @@ -289,14 +289,14 @@ group = int(escape[1:]) if group < state.groups: if not state.checkgroup(group): - raise error, "cannot refer to open group" + raise error("cannot refer to open group") return GROUPREF, group raise ValueError if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass - raise error, "bogus escape: %s" % repr(escape) + raise error("bogus escape: %s" % repr(escape)) def _parse_sub(source, state, nested=1): # parse an alternation: a|b|c @@ -313,7 +313,7 @@ if not source.next or sourcematch(")", 0): break else: - raise error, "pattern not properly closed" + raise error("pattern not properly closed") if len(items) == 1: return items[0] @@ -362,11 +362,11 @@ if source.match("|"): item_no = _parse(source, state) if source.match("|"): - raise error, "conditional backref with more than two branches" + raise error("conditional backref with more than two branches") else: item_no = None if source.next and not source.match(")", 0): - raise error, "pattern not properly closed" + raise error("pattern not properly closed") subpattern = SubPattern(state) subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) return subpattern @@ -431,7 +431,7 @@ elif this: code1 = LITERAL, ord(this) else: - raise error, "unexpected end of regular expression" + raise error("unexpected end of regular expression") if sourcematch("-"): # potential range this = sourceget() @@ -447,14 +447,14 @@ else: code2 = LITERAL, ord(this) if code1[0] != LITERAL or code2[0] != LITERAL: - raise error, "bad character range" + raise error("bad character range") lo = code1[1] hi = code2[1] if hi < lo: - raise error, "bad character range" + raise error("bad character range") setappend((RANGE, (lo, hi))) else: - raise error, "unexpected end of regular expression" + raise error("unexpected end of regular expression") else: if code1[0] is IN: code1 = code1[1][0] @@ -507,16 +507,16 @@ if max < min: raise error("bad repeat interval") else: - raise error, "not supported" + raise error("not supported") # figure out which item to repeat if subpattern: item = subpattern[-1:] else: item = None if not item or (_len(item) == 1 and item[0][0] == AT): - raise error, "nothing to repeat" + raise error("nothing to repeat") if item[0][0] in REPEATCODES: - raise error, "multiple repeat" + raise error("multiple repeat") if sourcematch("?"): subpattern[-1] = (MIN_REPEAT, (min, max, item)) else: @@ -540,7 +540,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ">": break name = name + char @@ -556,7 +556,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ")": break name = name + char @@ -567,14 +567,14 @@ "%r" % name) gid = state.groupdict.get(name) if gid is None: - raise error, "unknown group name" + raise error("unknown group name") subpatternappend((GROUPREF, gid)) continue else: char = sourceget() if char is None: - raise error, "unexpected end of pattern" - raise error, "unknown specifier: ?P%s" % char + raise error("unexpected end of pattern") + raise error("unknown specifier: ?P%s" % char) elif sourcematch(":"): # non-capturing group group = 2 @@ -585,7 +585,7 @@ break sourceget() if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") continue elif source.next in ASSERTCHARS: # lookahead assertions @@ -593,12 +593,12 @@ dir = 1 if char == "<": if source.next not in LOOKBEHINDASSERTCHARS: - raise error, "syntax error" + raise error("syntax error") dir = -1 # lookbehind char = sourceget() p = _parse_sub(source, state) if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") if char == "=": subpatternappend((ASSERT, (dir, p))) else: @@ -610,7 +610,7 @@ while 1: char = sourceget() if char is None: - raise error, "unterminated name" + raise error("unterminated name") if char == ")": break condname = condname + char @@ -620,16 +620,16 @@ if isname(condname): condgroup = state.groupdict.get(condname) if condgroup is None: - raise error, "unknown group name" + raise error("unknown group name") else: try: condgroup = int(condname) except ValueError: - raise error, "bad character in group name" + raise error("bad character in group name") else: # flags if not source.next in FLAGS: - raise error, "unexpected end of pattern" + raise error("unexpected end of pattern") while source.next in FLAGS: state.flags = state.flags | FLAGS[sourceget()] if group: @@ -644,7 +644,7 @@ else: p = _parse_sub(source, state) if not sourcematch(")"): - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") if group is not None: state.closegroup(group) subpatternappend((SUBPATTERN, (group, p))) @@ -652,10 +652,10 @@ while 1: char = sourceget() if char is None: - raise error, "unexpected end of pattern" + raise error("unexpected end of pattern") if char == ")": break - raise error, "unknown extension" + raise error("unknown extension") elif this == "^": subpatternappend((AT, AT_BEGINNING)) @@ -668,7 +668,7 @@ subpatternappend(code) else: - raise error, "parser error" + raise error("parser error") return subpattern @@ -686,9 +686,9 @@ tail = source.get() if tail == ")": - raise error, "unbalanced parenthesis" + raise error("unbalanced parenthesis") elif tail: - raise error, "bogus characters at end of regular expression" + raise error("bogus characters at end of regular expression") if flags & SRE_FLAG_DEBUG: p.dump() @@ -730,23 +730,23 @@ while 1: char = sget() if char is None: - raise error, "unterminated group name" + raise error("unterminated group name") if char == ">": break name = name + char if not name: - raise error, "missing group name" + raise error("missing group name") try: index = int(name) if index < 0: - raise error, "negative group number" + raise error("negative group number") except ValueError: if not isname(name): - raise error, "bad character in group name" + raise error("bad character in group name") try: index = pattern.groupindex[name] except KeyError: - raise IndexError, "unknown group name" + raise IndexError("unknown group name") a((MARK, index)) elif c == "0": if s.next in OCTDIGITS: @@ -796,7 +796,7 @@ for index, group in groups: literals[index] = s = g(group) if s is None: - raise error, "unmatched group" + raise error("unmatched group") except IndexError: - raise error, "invalid group reference" + raise error("invalid group reference") return sep.join(literals) diff --git a/rpython/rlib/rstruct/formatiterator.py b/rpython/rlib/rstruct/formatiterator.py --- a/rpython/rlib/rstruct/formatiterator.py +++ b/rpython/rlib/rstruct/formatiterator.py @@ -82,6 +82,7 @@ def finished(self): pass + class CalcSizeFormatIterator(FormatIterator): totalsize = 0 diff --git a/rpython/rlib/rzipfile.py b/rpython/rlib/rzipfile.py --- a/rpython/rlib/rzipfile.py +++ b/rpython/rlib/rzipfile.py @@ -214,7 +214,7 @@ def _GetContents(self, fp): endrec = _EndRecData(fp) if not endrec: - raise BadZipfile, "File is not a zip file" + raise BadZipfile("File is not a zip file") size_cd = endrec.stuff[5] # bytes in central directory offset_cd = endrec.stuff[6] # offset of central directory self.comment = endrec.comment @@ -227,7 +227,7 @@ centdir = fp.read(46) total = total + 46 if centdir[0:4] != stringCentralDir: - raise BadZipfile, "Bad magic number for central directory" + raise BadZipfile("Bad magic number for central directory") centdir = runpack(structCentralDir, centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information @@ -255,7 +255,7 @@ fp.seek(data.header_offset, 0) fheader = fp.read(30) if fheader[0:4] != stringFileHeader: - raise BadZipfile, "Bad magic number for file header" + raise BadZipfile("Bad magic number for file header") fheader = runpack(structFileHeader, fheader) # file_offset is computed here, since the extra field for # the central directory and for the local file header @@ -266,9 +266,8 @@ + fheader[_FH_EXTRA_FIELD_LENGTH]) fname = fp.read(fheader[_FH_FILENAME_LENGTH]) if fname != data.orig_filename: - raise BadZipfile, \ - 'File name in directory "%s" and header "%s" differ.' % ( - data.orig_filename, fname) + raise BadZipfile('File name in directory "%s" and ' + 'header "%s" differ.' % (data.orig_filename, fname)) fp.seek(self.start_dir, 0) def getinfo(self, filename): @@ -296,15 +295,13 @@ finally: rzlib.inflateEnd(stream) elif zinfo.compress_type == ZIP_DEFLATED: - raise BadZipfile, \ - "Cannot decompress file, zlib not installed" + raise BadZipfile("Cannot decompress file, zlib not installed") else: - raise BadZipfile, \ - "Unsupported compression method %d for file %s" % \ - (zinfo.compress_type, filename) + raise BadZipfile("Unsupported compression method %d for " + "file %s" % (zinfo.compress_type, filename)) crc = crc32(bytes) if crc != zinfo.CRC: - raise BadZipfile, "Bad CRC-32 for file %s" % filename + raise BadZipfile("Bad CRC-32 for file %s" % filename) return bytes finally: fp.close() diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -6,4 +6,5 @@ assert buf.getitem(4) == 'o' assert buf.getlength() == 11 assert buf.getslice(1, 6, 1, 5) == 'ello ' + assert buf.getslice(1, 6, 2, 3) == 'el ' assert buf.as_str() == 'hello world' diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -95,7 +95,7 @@ elif whence == 2: offset += len(self.buf) else: - raise ValueError, "whence should be 0, 1 or 2" + raise ValueError("whence should be 0, 1 or 2") if offset < 0: offset = 0 self.pos = offset diff --git a/rpython/rlib/unicodedata/unicodedb_5_2_0.py b/rpython/rlib/unicodedata/unicodedb_5_2_0.py --- a/rpython/rlib/unicodedata/unicodedb_5_2_0.py +++ b/rpython/rlib/unicodedata/unicodedb_5_2_0.py @@ -39,7 +39,7 @@ charnode = left else: charnode = right - raise KeyError, name + raise KeyError(name) def name_of_node(charnode): res = [] @@ -112664,7 +112664,7 @@ if code == 917505: res = 9201 if 917536 <= code <= 917631: res = _charnames_917536[code-917536] if 917760 <= code <= 917999: res = _charnames_917760[code-917760] - if res == -1: raise KeyError, code + if res == -1: raise KeyError(code) return name_of_node(res) # the following dictionary is used by modules that take this as a base diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -58,7 +58,7 @@ try: holders = arguments.match_signature(signature, defs_h) except ArgErr, e: - raise TyperError, "signature mismatch: %s" % e.getmsg(graph.name) + raise TyperError("signature mismatch: %s" % e.getmsg(graph.name)) assert len(holders) == len(rinputs), "argument parsing mismatch" vlist = [] diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -257,7 +257,7 @@ @classmethod def _malloc(cls, n=None): if not isinstance(n, int): - raise TypeError, "array length must be an int" + raise TypeError("array length must be an int") biggercls = get_ctypes_array_of_size(A, n) bigarray = allocate_ctypes(biggercls) if hasattr(bigarray, 'length'): diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -191,7 +191,7 @@ _adtmeths = {} def _inline_is_varsize(self, last): - raise TypeError, "%r cannot be inlined in structure" % self + raise TypeError("%r cannot be inlined in structure" % self) def _install_extras(self, adtmeths={}, hints={}): self._adtmeths = frozendict(adtmeths) @@ -253,7 +253,7 @@ self._arrayfld = None for name, typ in fields: if name.startswith('_'): - raise NameError, ("%s: field name %r should not start with " + raise NameError("%s: field name %r should not start with " "an underscore" % (self._name, name,)) names.append(name) if name in flds: @@ -311,8 +311,8 @@ def _nofield(self, name): - raise AttributeError, 'struct %s has no field %r' % (self._name, - name) + raise AttributeError('struct %s has no field %r' % (self._name, + name)) def _names_without_voids(self): names_without_voids = [name for name in self._names if self._flds[name] is not Void] @@ -545,7 +545,7 @@ self.ARGS = tuple(args) assert isinstance(result, LowLevelType) if isinstance(result, ContainerType): - raise TypeError, "function result can only be primitive or pointer" + raise TypeError("function result can only be primitive or pointer") self.RESULT = result self.ABI = abi @@ -602,7 +602,7 @@ return "%s (gcopaque)" % self.tag def _inline_is_varsize(self, last): - raise TypeError, "%r cannot be inlined in structure" % self + raise TypeError("%r cannot be inlined in structure" % self) class ForwardReference(ContainerType): @@ -714,7 +714,7 @@ _cache = WeakValueDictionary() # cache the Ptrs def __new__(cls, TO, use_cache=True): if not isinstance(TO, ContainerType): - raise TypeError, ("can only point to a Container type, " + raise TypeError("can only point to a Container type, " "not to %s" % (TO,)) if not use_cache: obj = LowLevelType.__new__(cls) @@ -835,7 +835,7 @@ def cast_primitive(TGT, value): ORIG = typeOf(value) if not isinstance(TGT, Primitive) or not isinstance(ORIG, Primitive): - raise TypeError, "can only primitive to primitive" + raise TypeError("can only primitive to primitive") if ORIG == TGT: return value if ORIG == Char or ORIG == UniChar: @@ -855,7 +855,7 @@ return float(value) if ORIG == LongFloat and TGT == Float: return float(value) - raise TypeError, "unsupported cast" + raise TypeError("unsupported cast") def _cast_whatever(TGT, value): from rpython.rtyper.lltypesystem import llmemory, rffi @@ -932,13 +932,13 @@ def cast_pointer(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): - raise TypeError, "can only cast pointers to other pointers" + raise TypeError("can only cast pointers to other pointers") return ptr._cast_to(PTRTYPE) def cast_opaque_ptr(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): - raise TypeError, "can only cast pointers to other pointers" + raise TypeError("can only cast pointers to other pointers") if CURTYPE == PTRTYPE: return ptr if CURTYPE.TO._gckind != PTRTYPE.TO._gckind: @@ -989,9 +989,9 @@ """ CURTYPE = typeOf(structptr).TO if not isinstance(CURTYPE, Struct): - raise TypeError, "direct_fieldptr: not a struct" + raise TypeError("direct_fieldptr: not a struct") if fieldname not in CURTYPE._flds: - raise TypeError, "%s has no field %r" % (CURTYPE, fieldname) + raise TypeError("%s has no field %r" % (CURTYPE, fieldname)) if not structptr: raise RuntimeError("direct_fieldptr: NULL argument") return _subarray._makeptr(structptr._obj, fieldname, structptr._solid) @@ -1004,7 +1004,7 @@ """ CURTYPE = typeOf(arrayptr).TO if not isinstance(CURTYPE, (Array, FixedSizeArray)): - raise TypeError, "direct_arrayitems: not an array" + raise TypeError("direct_arrayitems: not an array") if not arrayptr: raise RuntimeError("direct_arrayitems: NULL argument") return _subarray._makeptr(arrayptr._obj, 0, arrayptr._solid) @@ -1247,7 +1247,7 @@ from rpython.rtyper.lltypesystem import rffi if isinstance(self._T, FuncType): if len(args) != len(self._T.ARGS): - raise TypeError,"calling %r with wrong argument number: %r" % (self._T, args) + raise TypeError("calling %r with wrong argument number: %r" % (self._T, args)) for i, a, ARG in zip(range(len(self._T.ARGS)), args, self._T.ARGS): if typeOf(a) != ARG: # ARG could be Void @@ -1272,11 +1272,11 @@ pass else: args_repr = [typeOf(arg) for arg in args] - raise TypeError, ("calling %r with wrong argument " + raise TypeError("calling %r with wrong argument " "types: %r" % (self._T, args_repr)) callb = self._obj._callable if callb is None: - raise RuntimeError,"calling undefined function" + raise RuntimeError("calling undefined function") return callb(*args) raise TypeError("%r instance is not a function" % (self._T,)) @@ -1421,7 +1421,7 @@ self._set_offsets(_offsets) def __nonzero__(self): - raise RuntimeError, "do not test an interior pointer for nullity" + raise RuntimeError("do not test an interior pointer for nullity") def _get_obj(self): ob = self._parent @@ -1657,9 +1657,9 @@ def __init__(self, TYPE, n, initialization=None, parent=None, parentindex=None): if not is_valid_int(n): - raise TypeError, "array length must be an int" + raise TypeError("array length must be an int") if n < 0: - raise ValueError, "negative array length" + raise ValueError("negative array length") _parentable.__init__(self, TYPE) myrange = self._check_range(n) self.items = [TYPE.OF._allocate(initialization=initialization, @@ -1977,9 +1977,9 @@ assert n is None o = _opaque(T, initialization=initialization) else: - raise TypeError, "malloc: unmallocable type" + raise TypeError("malloc: unmallocable type") if flavor == 'gc' and T._gckind != 'gc' and not immortal: - raise TypeError, "gc flavor malloc of a non-GC non-immortal structure" + raise TypeError("gc flavor malloc of a non-GC non-immortal structure") if flavor == "raw" and not immortal and track_allocation: leakfinder.remember_malloc(o, framedepth=2) solid = immortal or flavor == 'raw' @@ -1987,10 +1987,10 @@ def free(p, flavor, track_allocation=True): if flavor.startswith('gc'): - raise TypeError, "gc flavor free" + raise TypeError("gc flavor free") T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': - raise TypeError, "free(): only for pointers to non-gc containers" + raise TypeError("free(): only for pointers to non-gc containers") if track_allocation: leakfinder.remember_free(p._obj0) p._obj0._free() @@ -1998,7 +1998,7 @@ def render_immortal(p, track_allocation=True): T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': - raise TypeError, "free(): only for pointers to non-gc containers" + raise TypeError("free(): only for pointers to non-gc containers") if track_allocation: leakfinder.remember_free(p._obj0) @@ -2033,7 +2033,7 @@ def functionptr(TYPE, name, **attrs): if not isinstance(TYPE, FuncType): - raise TypeError, "functionptr() for FuncTypes only" + raise TypeError("functionptr() for FuncTypes only") try: hash(tuple(attrs.items())) except TypeError: @@ -2046,7 +2046,7 @@ def opaqueptr(TYPE, name, **attrs): if not isinstance(TYPE, OpaqueType): - raise TypeError, "opaqueptr() for OpaqueTypes only" + raise TypeError("opaqueptr() for OpaqueTypes only") o = _opaque(TYPE, _name=name, **attrs) return _ptr(Ptr(TYPE), o, solid=True) @@ -2064,23 +2064,23 @@ def attachRuntimeTypeInfo(GCSTRUCT, funcptr=None, destrptr=None, customtraceptr=None): if not isinstance(GCSTRUCT, RttiStruct): - raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT + raise TypeError("expected a RttiStruct: %s" % GCSTRUCT) GCSTRUCT._attach_runtime_type_info_funcptr(funcptr, destrptr, customtraceptr) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) def getRuntimeTypeInfo(GCSTRUCT): if not isinstance(GCSTRUCT, RttiStruct): - raise TypeError, "expected a RttiStruct: %s" % GCSTRUCT + raise TypeError("expected a RttiStruct: %s" % GCSTRUCT) if GCSTRUCT._runtime_type_info is None: - raise ValueError, ("no attached runtime type info for GcStruct %s" % + raise ValueError("no attached runtime type info for GcStruct %s" % GCSTRUCT._name) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) def runtime_type_info(p): T = typeOf(p) if not isinstance(T, Ptr) or not isinstance(T.TO, RttiStruct): - raise TypeError, "runtime_type_info on non-RttiStruct pointer: %s" % p + raise TypeError("runtime_type_info on non-RttiStruct pointer: %s" % p) struct = p._obj top_parent = top_container(struct) result = getRuntimeTypeInfo(top_parent._TYPE) @@ -2090,7 +2090,7 @@ T = typeOf(query_funcptr).TO.ARGS[0] result2 = query_funcptr(cast_pointer(T, p)) if result != result2: - raise RuntimeError, ("runtime type-info function for %s:\n" + raise RuntimeError("runtime type-info function for %s:\n" " returned: %s,\n" "should have been: %s" % (p, result2, result)) return result diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -379,7 +379,7 @@ def rtype_method_replace(self, hop): rstr = hop.args_r[0].repr if not (hop.args_r[1] == rstr.char_repr and hop.args_r[2] == rstr.char_repr): - raise TyperError, 'replace only works for char args' + raise TyperError('replace only works for char args') v_str, v_c1, v_c2 = hop.inputargs(rstr.repr, rstr.char_repr, rstr.char_repr) hop.exception_cannot_occur() return hop.gendirectcall(self.ll.ll_replace_chr_chr, v_str, v_c1, v_c2) diff --git a/rpython/translator/backendopt/all.py b/rpython/translator/backendopt/all.py --- a/rpython/translator/backendopt/all.py +++ b/rpython/translator/backendopt/all.py @@ -22,12 +22,12 @@ try: mod = __import__(module, {}, {}, ['__doc__']) except ImportError, e: - raise Exception, "Import error loading %s: %s" % (dottedname, e) + raise Exception("Import error loading %s: %s" % (dottedname, e)) try: func = getattr(mod, name) except AttributeError: - raise Exception, "Function %s not found in module" % dottedname + raise Exception("Function %s not found in module" % dottedname) return func diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -178,7 +178,7 @@ else: return self.db.get(value) else: - raise TypeError, "expr(%r)" % (v,) + raise TypeError("expr(%r)" % (v,)) # ____________________________________________________________ diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -934,7 +934,7 @@ elif hasattr(fnobj._callable, "c_name"): return [] else: - raise ValueError, "don't know how to generate code for %r" % (fnobj,) + raise ValueError("don't know how to generate code for %r" % (fnobj,)) class ExtType_OpaqueNode(ContainerNode): nodekind = 'rpyopaque' diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -35,9 +36,6 @@ pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif pypy_asm_stack_bottom(); -#ifdef PYPY_X86_CHECK_SSE2_DEFINED - pypy_x86_check_sse2(); -#endif instrument_setup(); #ifndef MS_WINDOWS @@ -83,6 +81,9 @@ int PYPY_MAIN_FUNCTION(int argc, char *argv[]) { +#ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_x86_check_sse2(); +#endif return pypy_main_function(argc, argv); } diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -627,7 +627,7 @@ elif output.startswith('T'): return output[1:] else: - raise ValueError, 'probing for env var returned %r' % (output,) + raise ValueError('probing for env var returned %r' % (output,)) def test_dictlike_environ_getitem(): def fn(s): diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -234,9 +234,9 @@ if os.WIFEXITED(status): status = os.WEXITSTATUS(status) if status != 0: - raise Exception, "instrumentation child failed: %d" % status + raise Exception("instrumentation child failed: %d" % status) else: - raise Exception, "instrumentation child aborted" + raise Exception("instrumentation child aborted") import array, struct n = datafile.size()//struct.calcsize('L') datafile = datafile.open('rb') diff --git a/rpython/translator/gensupp.py b/rpython/translator/gensupp.py --- a/rpython/translator/gensupp.py +++ b/rpython/translator/gensupp.py @@ -39,7 +39,7 @@ before generating any new names.""" for name in txt.split(): if name in self.seennames: - raise NameError, "%s has already been seen!" + raise NameError("%s has already been seen!") self.seennames[name] = 1 def _ensure_unique(self, basename): diff --git a/rpython/translator/goal/bpnn.py b/rpython/translator/goal/bpnn.py --- a/rpython/translator/goal/bpnn.py +++ b/rpython/translator/goal/bpnn.py @@ -74,7 +74,7 @@ def update(self, inputs): if len(inputs) != self.ni-1: - raise ValueError, 'wrong number of inputs' + raise ValueError('wrong number of inputs') # input activations for i in range(self.ni-1): @@ -100,7 +100,7 @@ def backPropagate(self, targets, N, M): if len(targets) != self.no: - raise ValueError, 'wrong number of target values' + raise ValueError('wrong number of target values') # calculate error terms for output output_deltas = [0.0] * self.no diff --git a/rpython/translator/goal/richards.py b/rpython/translator/goal/richards.py --- a/rpython/translator/goal/richards.py +++ b/rpython/translator/goal/richards.py @@ -102,13 +102,13 @@ self.task_waiting = False self.task_holding = False return self - + def waitingWithPacket(self): self.packet_pending = True self.task_waiting = True self.task_holding = False return self - + def isPacketPending(self): return self.packet_pending @@ -144,6 +144,9 @@ class TaskWorkArea(object): def __init__(self): + self.reset() + + def reset(self): self.taskTab = [None] * TASKTABSIZE self.taskList = None @@ -233,7 +236,7 @@ if t is None: raise Exception("Bad task id %d" % id) return t - + # DeviceTask @@ -307,7 +310,7 @@ else: i.control = i.control/2 ^ 0xd008 return self.release(I_DEVB) - + # WorkTask @@ -361,8 +364,7 @@ def run(self, iterations): for i in xrange(iterations): - taskWorkArea.holdCount = 0 - taskWorkArea.qpktCount = 0 + taskWorkArea.reset() IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec()) @@ -383,7 +385,7 @@ wkq = None; DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec()); DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec()); - + schedule() if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -267,7 +267,7 @@ # Only required on armhf and mips{,el}, not armel. But there's no way to # detect armhf without shelling out if (platform.architecture()[0] == '64bit' - or platform.machine().startswith(('arm', 'mips'))): + or platform.machine().startswith(('arm', 'mips', 'ppc'))): host_factory = LinuxPIC else: host_factory = Linux diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -20,8 +20,8 @@ try: subprocess.check_output([cc, '--version']) except: - raise ValueError,"Could not find compiler specified by cc option" + \ - " '%s', it must be a valid exe file on your path"%cc + raise ValueError("Could not find compiler specified by cc option '%s'," + " it must be a valid exe file on your path" % cc) return MingwPlatform(cc) def Windows(cc=None): @@ -31,7 +31,7 @@ raise Exception("Win64 is not supported. You must either build for Win32" " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) - + def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -94,7 +94,7 @@ name = "msvc" so_ext = 'dll' exe_ext = 'exe' - + relevant_environ = ('PATH', 'INCLUDE', 'LIB') cc = 'cl.exe' @@ -105,7 +105,7 @@ standalone_only = () shared_only = () environ = None - + def __init__(self, cc=None, x64=False): self.x64 = x64 msvc_compiler_environ = find_msvc_env(x64) @@ -134,7 +134,7 @@ else: masm32 = 'ml.exe' masm64 = 'ml64.exe' - + if x64: self.masm = masm64 else: @@ -338,10 +338,10 @@ definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) rules.append(('$(OBJECTS)', 'stdafx.pch', [])) - rules.append(('stdafx.pch', 'stdafx.h', + rules.append(('stdafx.pch', 'stdafx.h', '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '$(CREATE_PCH) $(INCLUDEDIRS)')) - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) #Do not use precompiled headers for some files @@ -361,7 +361,7 @@ '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) else: - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) @@ -371,7 +371,7 @@ for rule in rules: m.rule(*rule) - + if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -116,7 +116,7 @@ print >>f, " ",op print >>f, '--end--' return - raise TypeError, "don't know about %r" % x + raise TypeError("don't know about %r" % x) def view(self): From noreply at buildbot.pypy.org Tue May 6 10:34:50 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 6 May 2014 10:34:50 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: consolidate all data types test cases across PyPy/cppyy and CPython/cppyy and add missing features (long double, enums, etc.) Message-ID: <20140506083450.1E0D11C01DE@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71317:32c47c1dcb90 Date: 2014-05-05 16:03 -0700 http://bitbucket.org/pypy/pypy/changeset/32c47c1dcb90/ Log: consolidate all data types test cases across PyPy/cppyy and CPython/cppyy and add missing features (long double, enums, etc.) diff too long, truncating to 2000 out of 2763 lines diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -15,6 +15,7 @@ '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', + '_is_enum' : 'interp_cppyy.is_enum', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -154,6 +154,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -100,18 +100,19 @@ c_method = c_opaque_ptr c_index = nt.new_primitive_type(space, 'long') - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = nt.new_void_type(space) + c_char = nt.new_primitive_type(space, 'char') + c_uchar = nt.new_primitive_type(space, 'unsigned char') + c_short = nt.new_primitive_type(space, 'short') + c_int = nt.new_primitive_type(space, 'int') + c_long = nt.new_primitive_type(space, 'long') + c_llong = nt.new_primitive_type(space, 'long long') + c_ullong = nt.new_primitive_type(space, 'unsigned long long') + c_float = nt.new_primitive_type(space, 'float') + c_double = nt.new_primitive_type(space, 'double') + c_ldouble = nt.new_primitive_type(space, 'long double') - c_ccharp = nt.new_pointer_type(space, c_char) + c_ccharp = nt.new_pointer_type(space, c_char) c_index_array = nt.new_pointer_type(space, c_void) c_voidp = nt.new_pointer_type(space, c_void) @@ -145,6 +146,7 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), @@ -330,6 +332,9 @@ def c_call_d(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) +def c_call_ld(space, cppmethod, cppobject, nargs, cargs): + args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + return rffi.cast(rffi.LONGDOUBLE, space.float_w(call_capi(space, 'call_ld', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -100,7 +100,8 @@ return fieldptr def _is_abstract(self, space): - raise OperationError(space.w_TypeError, space.wrap("no converter available for '%s'" % self.name)) + raise OperationError(space.w_TypeError, + space.wrap("no converter available for '%s'" % self.name)) def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -241,9 +242,8 @@ def convert_argument_libffi(self, space, w_obj, address, call_local): assert rffi.sizeof(self.c_type) <= 2*rffi.sizeof(rffi.VOIDP) # see interp_cppyy.py - obj = self._unwrap_object(space, w_obj) typed_buf = rffi.cast(self.c_ptrtype, call_local) - typed_buf[0] = obj + typed_buf[0] = self._unwrap_object(space, w_obj) x = rffi.cast(rffi.VOIDPP, address) x[0] = call_local @@ -299,22 +299,61 @@ else: address[0] = '\x00' -class CharConverter(ffitypes.typeid(rffi.CHAR), TypeConverter): + +class CharTypeConverterMixin(NumericTypeConverterMixin): + _mixin_ = True + + def __init__(self, space, default): + if default: + cval = default[0] + else: + cval = '\x00' + self.default = cval + +class CharConverter(ffitypes.typeid(rffi.CHAR), CharTypeConverterMixin, TypeConverter): + _immutable_ = True + def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.CCHARP, address) x[0] = self._unwrap_object(space, w_obj) - def convert_argument_libffi(self, space, w_obj, address, call_local): - x = rffi.cast(self.c_ptrtype, address) + +class ConstRefCharTypeConverterMixin(ConstRefNumericTypeConverterMixin): + _mixin_ = True + + def __init__(self, space, default): + if default: + cval = default[0] + else: + cval = '\x00' + self.default = cval + +class ConstRefCharConverter(ffitypes.typeid(rffi.CHAR), + ConstRefCharTypeConverterMixin, TypeConverter): + _immuteable_ = True + uses_local = True + libffitype = jit_libffi.types.pointer + + def convert_argument(self, space, w_obj, address, call_local): + x = rffi.cast(rffi.CCHARP, address) x[0] = self._unwrap_object(space, w_obj) - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - return space.wrap(address[0]) +class SCharConverter(ffitypes.typeid(rffi.SIGNEDCHAR), CharTypeConverterMixin, TypeConverter): + _immutable_ = True - def to_memory(self, space, w_obj, w_value, offset): - address = rffi.cast(rffi.CCHARP, self._get_raw_address(space, w_obj, offset)) - address[0] = self._unwrap_object(space, w_value) + def convert_argument(self, space, w_obj, address, call_local): + x = rffi.cast(rffi.CCHARP, address) + x[0] = self._unwrap_object(space, w_obj) + +class ConstRefSCharConverter(ffitypes.typeid(rffi.SIGNEDCHAR), + ConstRefCharTypeConverterMixin, TypeConverter): + _immuteable_ = True + uses_local = True + libffitype = jit_libffi.types.pointer + + def convert_argument(self, space, w_obj, address, call_local): + x = rffi.cast(rffi.CCHARP, address) + x[0] = self._unwrap_object(space, w_obj) class FloatConverter(ffitypes.typeid(rffi.FLOAT), FloatTypeConverterMixin, TypeConverter): _immutable_fields_ = ['default'] @@ -357,6 +396,36 @@ typecode = 'D' +class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): + _immutable_fields_ = ['default'] + typecode = '?' + + def __init__(self, space, default): + if default: + self.default = rffi.cast(self.c_type, rfloat.rstring_to_float(default)) + else: + self.default = rffi.cast(self.c_type, 0.) + + def default_argument_libffi(self, space, address): + # suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = self._get_raw_address(space, w_obj, offset) + rffiptr = rffi.cast(self.c_ptrtype, address) + # TODO: this looses precision + return space.wrap(float(rffiptr[0])) + +class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): + _immutable_fields_ = ['libffitype', 'typecode'] + libffitype = jit_libffi.types.pointer + + def default_argument_libffi(self, space, address): + # suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + class CStringConverter(TypeConverter): def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) @@ -677,7 +746,10 @@ elif compound == "": return InstanceConverter(space, cppclass) elif capi.c_is_enum(space, clean_name): - return _converters['unsigned'](space, default) + # TODO: this is missing several cases + if compound == "&": + return _converters['const unsigned int&'](space, default) + return _converters['unsigned int'+compound](space, default) # 5) void converter, which fails on use # @@ -688,10 +760,15 @@ _converters["bool"] = BoolConverter _converters["char"] = CharConverter +_converters["const char&"] = ConstRefCharConverter +_converters["signed char"] = SCharConverter +_converters["const signed char&"] = ConstRefSCharConverter _converters["float"] = FloatConverter _converters["const float&"] = ConstFloatRefConverter _converters["double"] = DoubleConverter _converters["const double&"] = ConstDoubleRefConverter +_converters["long double"] = LongDoubleConverter +_converters["const long double&"] = ConstLongDoubleRefConverter _converters["const char*"] = CStringConverter _converters["void*"] = VoidPtrConverter _converters["void**"] = VoidPtrPtrConverter @@ -807,8 +884,12 @@ "NOT_RPYTHON" aliases = ( ("char", "unsigned char"), + ("const char&", "const unsigned char&"), ("const char*", "char*"), + ("long long", "Long64_t"), + ("unsigned long long", "ULong64_t"), + ("std::basic_string", "string"), ("const std::basic_string&", "const string&"), ("std::basic_string&", "string&"), diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -3,7 +3,7 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import jit_libffi +from rpython.rlib import jit, jit_libffi from pypy.module._rawffi.interp_rawffi import unpack_simple_shape from pypy.module._rawffi.array import W_Array, W_ArrayInstance @@ -122,6 +122,36 @@ rffi.cast(self.c_ptrtype, rffi.cast(rffi.VOIDPP, result)[0])) +class LongDoubleExecutor(ffitypes.typeid(rffi.LONGDOUBLE), NumericExecutorMixin, FunctionExecutor): + # exists to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + _immutable_ = True + c_stubcall = staticmethod(capi.c_call_ld) + + @jit.dont_look_inside + def execute(self, space, cppmethod, cppthis, num_args, args): + result = self.c_stubcall(space, cppmethod, cppthis, num_args, args) + return self._wrap_object(space, rffi.cast(self.c_type, result)) + + def execute_libffi(self, space, cif_descr, funcaddr, buffer): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + +class LongDoubleRefExecutor(ffitypes.typeid(rffi.LONGDOUBLE), + NumericRefExecutorMixin, FunctionExecutor): + # exists to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + _immutable_fields_ = ['libffitype'] + libffitype = jit_libffi.types.pointer + + @jit.dont_look_inside + def execute(self, space, cppmethod, cppthis, num_args, args): + result = capi.c_call_r(space, cppmethod, cppthis, num_args, args) + return self._wrap_reference(space, rffi.cast(self.c_ptrtype, result)) + + def execute_libffi(self, space, cif_descr, funcaddr, buffer): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + + class CStringExecutor(FunctionExecutor): def execute(self, space, cppmethod, cppthis, num_args, args): @@ -313,6 +343,7 @@ type_info = ( (bool, capi.c_call_b, ("bool",)), (rffi.CHAR, capi.c_call_c, ("char", "unsigned char")), + (rffi.SIGNEDCHAR, capi.c_call_c, ("signed char",)), (rffi.SHORT, capi.c_call_h, ("short", "short int", "unsigned short", "unsigned short int")), (rffi.INT, capi.c_call_i, ("int",)), (rffi.UINT, capi.c_call_l, ("unsigned", "unsigned int")), @@ -335,6 +366,10 @@ _executors[name] = BasicExecutor _executors[name+'&'] = BasicRefExecutor _executors['const '+name+'&'] = BasicRefExecutor # no copy needed for builtins + # exists to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + _executors["long double"] = LongDoubleExecutor + _executors["long double&"] = LongDoubleRefExecutor + _executors["const long double&"] = LongDoubleRefExecutor _build_basic_executors() # create the pointer executors; all real work is in the PtrTypeExecutor, since diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rarithmetic import r_singlefloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from rpython.rlib import jit_libffi, rfloat # Mixins to share between converter and executor classes (in converter.py and @@ -32,7 +32,7 @@ _mixin_ = True _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] - libffitype = jit_libffi.types.schar + libffitype = jit_libffi.types.uchar c_type = rffi.CHAR c_ptrtype = rffi.CCHARP # there's no such thing as rffi.CHARP @@ -44,14 +44,45 @@ raise OperationError(space.w_ValueError, space.wrap("char arg not in range(256)")) - value = rffi.cast(rffi.CHAR, space.c_int_w(w_value)) - else: - value = space.str_w(w_value) + return rffi.cast(self.c_type, space.c_int_w(w_value)) + + # else string type + value = space.str_w(w_value) if len(value) != 1: - raise OperationError(space.w_ValueError, + raise OperationError(space.w_TypeError, space.wrap("char expected, got string of size %d" % len(value))) - return value[0] # turn it into a "char" to the annotator + return rffi.cast(self.c_type, value[0]) + +UCharTypeMixin = CharTypeMixin + +class SCharTypeMixin(object): + _mixin_ = True + _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype'] + + # TODO: signed char is treated as an int type, figure out what's best + libffitype = jit_libffi.types.schar + c_type = rffi.CHAR + c_ptrtype = rffi.CCHARP + + def _unwrap_object(self, space, w_value): + # allow int to pass to char and make sure that str is of length 1 + if space.isinstance_w(w_value, space.w_int): + ival = space.c_int_w(w_value) + if ival < -128 or 128 <= ival: + raise OperationError(space.w_ValueError, + space.wrap("signed char arg not in range(-128, 128)")) + + return rffi.cast(self.c_type, space.c_int_w(w_value)) + + # else string type + value = space.str_w(w_value) + + if len(value) != 1: + raise OperationError(space.w_TypeError, + space.wrap("signef char expected, got string of size %d" % len(value))) + return rffi.cast(self.c_type, value[0]) + class ShortTypeMixin(object): _mixin_ = True @@ -168,11 +199,29 @@ def _unwrap_object(self, space, w_obj): return space.float_w(w_obj) +class LongDoubleTypeMixin(object): + _mixin_ = True + _immutable_fields_ = ['libffitype', 'c_type', 'c_ptrtype', 'typecode'] + + libffitype = jit_libffi.types.longdouble + c_type = rffi.LONGDOUBLE + c_ptrtype = rffi.LONGDOUBLEP + + def _unwrap_object(self, space, w_obj): + # TODO: this looses precision + return r_longfloat(space.float_w(w_obj)) + + def _wrap_object(self, space, obj): + # TODO: this looses precision + return space.wrap(float(obj)) + def typeid(c_type): "NOT_RPYTHON" if c_type == bool: return BoolTypeMixin if c_type == rffi.CHAR: return CharTypeMixin + if c_type == rffi.UCHAR: return UCharTypeMixin + if c_type == rffi.SIGNEDCHAR: return SCharTypeMixin if c_type == rffi.SHORT: return ShortTypeMixin if c_type == rffi.USHORT: return UShortTypeMixin if c_type == rffi.INT: return IntTypeMixin @@ -183,6 +232,7 @@ if c_type == rffi.ULONGLONG: return ULongLongTypeMixin if c_type == rffi.FLOAT: return FloatTypeMixin if c_type == rffi.DOUBLE: return DoubleTypeMixin + if c_type == rffi.LONGDOUBLE: return LongDoubleTypeMixin # should never get here raise TypeError("unknown rffi type: %s" % c_type) diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h --- a/pypy/module/cppyy/include/capi.h +++ b/pypy/module/cppyy/include/capi.h @@ -38,6 +38,7 @@ long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); double cppyy_call_d(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); + long double cppyy_call_ld(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args); diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -700,6 +700,10 @@ except Exception: return space.w_False + at unwrap_spec(name=str) +def is_enum(space, name): + return space.wrap(capi.c_is_enum(space, name)) + class W_CPPScope(W_Root): _attrs_ = ['space', 'name', 'handle', 'methods', 'datamembers'] _immutable_fields_ = ['kind', 'name'] diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -231,6 +231,11 @@ else: pycppitem = make_pycppclass(scope, true_name, name, cppitem) + # enums (special case) + if not cppitem: + if cppyy._is_enum(full_name): + return getattr(cppyy.gbl, "unsigned int") + # templates if not cppitem: cppitem = cppyy._template_byname(true_name) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -12,6 +12,8 @@ #include #include +#include + #pragma GCC diagnostic ignored "-Winvalid-offsetof" // add example01.cxx code @@ -78,50 +80,56 @@ #define PUBLIC_CPPYY_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ - offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + offsetof(dummy::CppyyTestData, m_##dmname), false)); \ argtypes.clear(); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "get_"#dmname, argtypes, #dmtype)); \ - s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + s_methods["CppyyTestData::get_"#dmname] = s_method_id++; \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_cr", argtypes, "const "#dmtype"&")); \ + s_methods["CppyyTestData::get_"#dmname"_cr"] = s_method_id++; \ argtypes.push_back(#dmtype); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "set_"#dmname, argtypes, "void")); \ - s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + s_methods["CppyyTestData::set_"#dmname] = s_method_id++; \ argtypes.clear(); \ argtypes.push_back("const "#dmtype"&"); \ methods.push_back(Cppyy_PseudoMethodInfo( \ - "set_"#dmname"_c", argtypes, "void")); \ - s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ + "set_"#dmname"_cr", argtypes, "void")); \ + s_methods["CppyyTestData::set_"#dmname"_cr"] = s_method_id++ #define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ PUBLIC_CPPYY_DATA(dmname, dmtype); \ data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ - offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + offsetof(dummy::CppyyTestData, m_##dmname##_array), false)); \ data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ - offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + offsetof(dummy::CppyyTestData, m_##dmname##_array2), false)); \ argtypes.clear(); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "get_"#dmname"_array", argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + s_methods["CppyyTestData::get_"#dmname"_array"] = s_method_id++; \ methods.push_back(Cppyy_PseudoMethodInfo( \ "get_"#dmname"_array2", argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + s_methods["CppyyTestData::get_"#dmname"_array2"] = s_method_id++ #define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ PUBLIC_CPPYY_DATA2(dmname, dmtype); \ argtypes.push_back(#dmtype"*"); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "pass_array", argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + s_methods["CppyyTestData::pass_array_"#dmname] = s_method_id++; \ argtypes.clear(); argtypes.push_back("void*"); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "pass_void_array_"#key, argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + s_methods["CppyyTestData::pass_void_array_"#key] = s_method_id++ #define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ - (ptrdiff_t)&dummy::cppyy_test_data::s_##dmname, true)) + (ptrdiff_t)&dummy::CppyyTestData::s_##dmname, true)) +static unsigned int g_kNothing = 6; +static unsigned int g_kSomething = 111; +static unsigned int g_kLots = 42; struct Cppyy_InitPseudoReflectionInfo { Cppyy_InitPseudoReflectionInfo() { @@ -284,22 +292,23 @@ //==================================================================== - { // class cppyy_test_data -- - s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + { // class CppyyTestData -- + s_handles["CppyyTestData"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // cppyy_test_data() + // CppyyTestData() std::vector argtypes; - methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); - s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("CppyyTestData", argtypes, "constructor", kConstructor)); + s_methods["CppyyTestData::CppyyTestData"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); - s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + s_methods["CppyyTestData::destroy_arrays"] = s_method_id++; std::vector data; PUBLIC_CPPYY_DATA2(bool, bool); PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (schar, signed char); PUBLIC_CPPYY_DATA (uchar, unsigned char); PUBLIC_CPPYY_DATA3(short, short, h); PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); @@ -311,10 +320,13 @@ PUBLIC_CPPYY_DATA (ullong, unsigned long long); PUBLIC_CPPYY_DATA3(float, float, f); PUBLIC_CPPYY_DATA3(double, double, d); - PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (ldouble, long double); + PUBLIC_CPPYY_DATA (enum, CppyyTestData::EWhat); PUBLIC_CPPYY_DATA (voidp, void*); + PUBLIC_CPPYY_STATIC_DATA(bool, bool); PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(schar, signed char); PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); PUBLIC_CPPYY_STATIC_DATA(short, short); PUBLIC_CPPYY_STATIC_DATA(ushort, unsigned short); @@ -326,12 +338,20 @@ PUBLIC_CPPYY_STATIC_DATA(ullong, unsigned long long); PUBLIC_CPPYY_STATIC_DATA(float, float); PUBLIC_CPPYY_STATIC_DATA(double, double); - PUBLIC_CPPYY_STATIC_DATA(enum, cppyy_test_data::what); + PUBLIC_CPPYY_STATIC_DATA(ldouble, long double); + PUBLIC_CPPYY_STATIC_DATA(enum, CppyyTestData::EWhat); PUBLIC_CPPYY_STATIC_DATA(voidp, void*); + data.push_back(Cppyy_PseudoDatambrInfo( + "kNothing", "CppyyTestData::EWhat", (ptrdiff_t)&g_kNothing, true)); + data.push_back(Cppyy_PseudoDatambrInfo( + "kSomething", "CppyyTestData::EWhat", (ptrdiff_t)&g_kSomething, true)); + data.push_back(Cppyy_PseudoDatambrInfo( + "kLots", "CppyyTestData::EWhat", (ptrdiff_t)&g_kLots, true)); + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size(), data); s_scopes[(cppyy_scope_t)s_scope_id] = info; - } // -- class cppyy_test_data + } // -- class CppyyTestData } } _init; @@ -385,79 +405,109 @@ } else if (idx == s_methods["example01::setPayload_payload*"]) { assert(self && nargs == 1); ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::destroy_arrays"]) { + } else if (idx == s_methods["CppyyTestData::destroy_arrays"]) { assert(self && nargs == 0); - ((dummy::cppyy_test_data*)self)->destroy_arrays(); - } else if (idx == s_methods["cppyy_test_data::set_bool"]) { + ((dummy::CppyyTestData*)self)->destroy_arrays(); + } else if (idx == s_methods["CppyyTestData::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); - } else if (idx == s_methods["cppyy_test_data::set_char"]) { + ((dummy::CppyyTestData*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["CppyyTestData::set_char"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); - } else if (idx == s_methods["cppyy_test_data::set_uchar"]) { + ((dummy::CppyyTestData*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["CppyyTestData::set_char_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); - } else if (idx == s_methods["cppyy_test_data::set_short"]) { + ((dummy::CppyyTestData*)self)->set_char_cr(*(char*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_schar"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); - } else if (idx == s_methods["cppyy_test_data::set_short_c"]) { + ((dummy::CppyyTestData*)self)->set_schar((signed char)((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["CppyyTestData::set_schar_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_short_c(*(short*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_ushort"]) { + ((dummy::CppyyTestData*)self)->set_schar_cr(*(signed char*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_uchar"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); - } else if (idx == s_methods["cppyy_test_data::set_ushort_c"]) { + ((dummy::CppyyTestData*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); + } else if (idx == s_methods["CppyyTestData::set_uchar_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ushort_c(*(unsigned short*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_int"]) { + ((dummy::CppyyTestData*)self)->set_uchar_cr(*(unsigned char*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_short"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); - } else if (idx == s_methods["cppyy_test_data::set_int_c"]) { + ((dummy::CppyyTestData*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); + } else if (idx == s_methods["CppyyTestData::set_short_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_int_c(*(int*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_uint"]) { + ((dummy::CppyyTestData*)self)->set_short_cr(*(short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ushort"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); - } else if (idx == s_methods["cppyy_test_data::set_uint_c"]) { + ((dummy::CppyyTestData*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); + } else if (idx == s_methods["CppyyTestData::set_ushort_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_uint_c(*(unsigned int*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_long"]) { + ((dummy::CppyyTestData*)self)->set_ushort_cr(*(unsigned short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_int"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); - } else if (idx == s_methods["cppyy_test_data::set_long_c"]) { + ((dummy::CppyyTestData*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["CppyyTestData::set_int_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_long_c(*(long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_ulong"]) { + ((dummy::CppyyTestData*)self)->set_int_cr(*(int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_uint"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); - } else if (idx == s_methods["cppyy_test_data::set_ulong_c"]) { + ((dummy::CppyyTestData*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["CppyyTestData::set_uint_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ulong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_llong"]) { + ((dummy::CppyyTestData*)self)->set_uint_cr(*(unsigned int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_long"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); - } else if (idx == s_methods["cppyy_test_data::set_llong_c"]) { + ((dummy::CppyyTestData*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["CppyyTestData::set_long_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_llong_c(*(long long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_ullong"]) { + ((dummy::CppyyTestData*)self)->set_long_cr(*(long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ulong"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); - } else if (idx == s_methods["cppyy_test_data::set_ullong_c"]) { + ((dummy::CppyyTestData*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); + } else if (idx == s_methods["CppyyTestData::set_ulong_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ullong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_float"]) { + ((dummy::CppyyTestData*)self)->set_ulong_cr(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_llong"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); - } else if (idx == s_methods["cppyy_test_data::set_float_c"]) { + ((dummy::CppyyTestData*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); + } else if (idx == s_methods["CppyyTestData::set_llong_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_float_c(*(float*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_double"]) { + ((dummy::CppyyTestData*)self)->set_llong_cr(*(long long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ullong"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); - } else if (idx == s_methods["cppyy_test_data::set_double_c"]) { + ((dummy::CppyyTestData*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); + } else if (idx == s_methods["CppyyTestData::set_ullong_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_double_c(*(double*)&((CPPYY_G__value*)args)[0]); + ((dummy::CppyyTestData*)self)->set_ullong_cr(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_float"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); + } else if (idx == s_methods["CppyyTestData::set_float_cr"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_float_cr(*(float*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_double"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["CppyyTestData::set_double_cr"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_double_cr(*(double*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ldouble"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_ldouble(((CPPYY_G__value*)args)[0].obj.ld); + } else if (idx == s_methods["CppyyTestData::set_ldouble_cr"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_ldouble_cr(*(long double*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_enum"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_enum( + (dummy::CppyyTestData::EWhat)((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["CppyyTestData::set_enum_cr"]) { + assert(self && nargs == 1); + ((dummy::CppyyTestData*)self)->set_enum_cr( + *(dummy::CppyyTestData::EWhat*)&((CPPYY_G__value*)args)[0]); } else { + for (std::map::iterator it = s_methods.begin(); + it != s_methods.end(); ++it) { + if (it->second == idx) std::cout << "MISSING: " << it->first << std::endl; + } assert(!"method unknown in cppyy_call_v"); } } @@ -465,9 +515,9 @@ unsigned char cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { unsigned char result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_bool"]) { + if (idx == s_methods["CppyyTestData::get_bool"]) { assert(self && nargs == 0); - result = (unsigned char)((dummy::cppyy_test_data*)self)->get_bool(); + result = (unsigned char)((dummy::CppyyTestData*)self)->get_bool(); } else { assert(!"method unknown in cppyy_call_b"); } @@ -477,12 +527,15 @@ char cppyy_call_c(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { char result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_char"]) { + if (idx == s_methods["CppyyTestData::get_char"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_char(); - } else if (idx == s_methods["cppyy_test_data::get_uchar"]) { + result = ((dummy::CppyyTestData*)self)->get_char(); + } else if (idx == s_methods["CppyyTestData::get_schar"]) { assert(self && nargs == 0); - result = (char)((dummy::cppyy_test_data*)self)->get_uchar(); + result = (char)((dummy::CppyyTestData*)self)->get_schar(); + } else if (idx == s_methods["CppyyTestData::get_uchar"]) { + assert(self && nargs == 0); + result = (char)((dummy::CppyyTestData*)self)->get_uchar(); } else { assert(!"method unknown in cppyy_call_c"); } @@ -492,12 +545,12 @@ short cppyy_call_h(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { short result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_short"]) { + if (idx == s_methods["CppyyTestData::get_short"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_short(); - } else if (idx == s_methods["cppyy_test_data::get_ushort"]) { + result = ((dummy::CppyyTestData*)self)->get_short(); + } else if (idx == s_methods["CppyyTestData::get_ushort"]) { assert(self && nargs == 0); - result = (short)((dummy::cppyy_test_data*)self)->get_ushort(); + result = (short)((dummy::CppyyTestData*)self)->get_ushort(); } else { assert(!"method unknown in cppyy_call_h"); } @@ -527,9 +580,9 @@ assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::get_int"]) { + } else if (idx == s_methods["CppyyTestData::get_int"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_int(); + result = ((dummy::CppyyTestData*)self)->get_int(); } else { assert(!"method unknown in cppyy_call_i"); } @@ -556,123 +609,130 @@ assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::get_uint"]) { + } else if (idx == s_methods["CppyyTestData::get_uint"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_uint(); - } else if (idx == s_methods["cppyy_test_data::get_long"]) { + result = (long)((dummy::CppyyTestData*)self)->get_uint(); + } else if (idx == s_methods["CppyyTestData::get_long"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_long(); - } else if (idx == s_methods["cppyy_test_data::get_ulong"]) { + result = ((dummy::CppyyTestData*)self)->get_long(); + } else if (idx == s_methods["CppyyTestData::get_ulong"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ulong(); - } else if (idx == s_methods["cppyy_test_data::get_bool_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ulong(); + } else if (idx == s_methods["CppyyTestData::get_enum"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_bool_array(); - } else if (idx == s_methods["cppyy_test_data::get_bool_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_enum(); + } else if (idx == s_methods["CppyyTestData::get_bool_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_bool_array2(); - } else if (idx == s_methods["cppyy_test_data::get_short_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_bool_array(); + } else if (idx == s_methods["CppyyTestData::get_bool_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_short_array(); - } else if (idx == s_methods["cppyy_test_data::get_short_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_bool_array2(); + } else if (idx == s_methods["CppyyTestData::get_short_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_short_array2(); - } else if (idx == s_methods["cppyy_test_data::get_ushort_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_short_array(); + } else if (idx == s_methods["CppyyTestData::get_short_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array(); - } else if (idx == s_methods["cppyy_test_data::get_ushort_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_short_array2(); + } else if (idx == s_methods["CppyyTestData::get_ushort_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array2(); - } else if (idx == s_methods["cppyy_test_data::get_int_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ushort_array(); + } else if (idx == s_methods["CppyyTestData::get_ushort_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_int_array(); - } else if (idx == s_methods["cppyy_test_data::get_int_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ushort_array2(); + } else if (idx == s_methods["CppyyTestData::get_int_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_int_array2(); - } else if (idx == s_methods["cppyy_test_data::get_uint_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_int_array(); + } else if (idx == s_methods["CppyyTestData::get_int_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_uint_array(); - } else if (idx == s_methods["cppyy_test_data::get_uint_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_int_array2(); + } else if (idx == s_methods["CppyyTestData::get_uint_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_uint_array2(); - } else if (idx == s_methods["cppyy_test_data::get_long_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_uint_array(); + } else if (idx == s_methods["CppyyTestData::get_uint_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_long_array(); - } else if (idx == s_methods["cppyy_test_data::get_long_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_uint_array2(); + } else if (idx == s_methods["CppyyTestData::get_long_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_long_array2(); - } else if (idx == s_methods["cppyy_test_data::get_ulong_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_long_array(); + } else if (idx == s_methods["CppyyTestData::get_long_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array(); - } else if (idx == s_methods["cppyy_test_data::get_ulong_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_long_array2(); + } else if (idx == s_methods["CppyyTestData::get_ulong_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array2(); - } else if (idx == s_methods["cppyy_test_data::pass_array_short"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ulong_array(); + } else if (idx == s_methods["CppyyTestData::get_ulong_array2"]) { + assert(self && nargs == 0); + result = (long)((dummy::CppyyTestData*)self)->get_ulong_array2(); + } else if (idx == s_methods["CppyyTestData::pass_array_short"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_h"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_h"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_h( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_h( (*(short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_ushort"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_ushort"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(unsigned short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_H"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_H"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_H( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_H( (*(unsigned short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_int"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_int"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_i"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_i"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_i( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_i( (*(int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_uint"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_uint"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(unsigned int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_I"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_I"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_I( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_I( (*(unsigned int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_long"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_long"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_l"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_l"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_l( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_l( (*(long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_ulong"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_ulong"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(unsigned long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_L"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_L"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_L( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_L( (*(unsigned long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_float"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_float"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(float**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_f"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_f"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_f( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_f( (*(float**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_double"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_double"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(double**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_d"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_d"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_d( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_d( (*(double**)&((CPPYY_G__value*)args)[0])); } else { - assert(!"method unknown in cppyy_call_l"); + for (std::map::iterator it = s_methods.begin(); + it != s_methods.end(); ++it) { + if (it->second == idx) std::cout << "MISSING: " << it->first << std::endl; + } + assert(!"method unknown in cppyy_call_l"); } return result; } @@ -680,12 +740,12 @@ long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { long long result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_llong"]) { + if (idx == s_methods["CppyyTestData::get_llong"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_llong(); - } else if (idx == s_methods["cppyy_test_data::get_ullong"]) { + result = ((dummy::CppyyTestData*)self)->get_llong(); + } else if (idx == s_methods["CppyyTestData::get_ullong"]) { assert(self && nargs == 0); - result = (long long)((dummy::cppyy_test_data*)self)->get_ullong(); + result = (long long)((dummy::CppyyTestData*)self)->get_ullong(); } else { assert(!"method unknown in cppyy_call_ll"); } @@ -695,9 +755,9 @@ float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { float result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_float"]) { + if (idx == s_methods["CppyyTestData::get_float"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_float(); + result = ((dummy::CppyyTestData*)self)->get_float(); } else { assert(!"method unknown in cppyy_call_f"); } @@ -716,15 +776,83 @@ } else if (idx == s_methods["payload::getData"]) { assert(self && nargs == 0); result = ((dummy::payload*)self)->getData(); - } else if (idx == s_methods["cppyy_test_data::get_double"]) { + } else if (idx == s_methods["CppyyTestData::get_double"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_double(); + result = ((dummy::CppyyTestData*)self)->get_double(); } else { assert(!"method unknown in cppyy_call_d"); } return result; } +long double cppyy_call_ld(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + long double result = 0.l; + const long idx = (long)method; + if (idx == s_methods["CppyyTestData::get_ldouble"]) { + assert(self && nargs == 0); + result = ((dummy::CppyyTestData*)self)->get_ldouble(); + } else { + assert(!"method unknown in cppyy_call_ld"); + } + return result; +} + +void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + void* result = 0; + const long idx = (long)method; + if (idx == s_methods["CppyyTestData::get_char_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_char_cr(); + } else if (idx == s_methods["CppyyTestData::get_uchar_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_uchar_cr(); + } else if (idx == s_methods["CppyyTestData::get_schar_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_schar_cr(); + } else if (idx == s_methods["CppyyTestData::get_short_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_short_cr(); + } else if (idx == s_methods["CppyyTestData::get_ushort_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_ushort_cr(); + } else if (idx == s_methods["CppyyTestData::get_int_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_int_cr(); + } else if (idx == s_methods["CppyyTestData::get_uint_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_uint_cr(); + } else if (idx == s_methods["CppyyTestData::get_long_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_long_cr(); + } else if (idx == s_methods["CppyyTestData::get_ulong_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_ulong_cr(); + } else if (idx == s_methods["CppyyTestData::get_llong_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_llong_cr(); + } else if (idx == s_methods["CppyyTestData::get_ullong_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_ullong_cr(); + } else if (idx == s_methods["CppyyTestData::get_float_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_float_cr(); + } else if (idx == s_methods["CppyyTestData::get_double_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_double_cr(); + } else if (idx == s_methods["CppyyTestData::get_ldouble_cr"]) { + assert(self && nargs == 0); + result = (void*)&((dummy::CppyyTestData*)self)->get_ldouble_cr(); + } else { + for (std::map::iterator it = s_methods.begin(); + it != s_methods.end(); ++it) { + if (it->second == idx) std::cout << "MISSING: " << it->first << std::endl; + } + assert(!"method unknown in cppyy_call_r"); + } + return result; +} + + char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { char* result = 0; const long idx = (long)method; @@ -750,9 +878,9 @@ assert(nargs == 0 || nargs == 1); if (nargs == 0) result = new dummy::payload; else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); - } else if (idx == s_methods["cppyy_test_data::cppyy_test_data"]) { + } else if (idx == s_methods["CppyyTestData::CppyyTestData"]) { assert(nargs == 0); - result = new dummy::cppyy_test_data; + result = new dummy::CppyyTestData; } else { assert(!"method unknown in cppyy_constructor"); } @@ -792,7 +920,9 @@ return 0; } -int cppyy_is_enum(const char* /* type_name */) { +int cppyy_is_enum(const char* type_name) { + if (strcmp(type_name, "CppyyTestData::EWhat") == 0) + return 1; return 0; } diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -67,7 +67,7 @@ char* cppyy_resolve_name(const char* cppitem_name) { Reflex::Scope s = Reflex::Scope::ByName(cppitem_name); if (s.IsEnum()) - return cppstring_to_cstring("unsigned int"); + return cppstring_to_cstring(cppitem_name); const std::string& name = s.Name(Reflex::SCOPED|Reflex::QUALIFIED|Reflex::FINAL); if (name.empty()) return cppstring_to_cstring(cppitem_name); @@ -165,6 +165,10 @@ return cppyy_call_T(method, self, nargs, args); } +long double cppyy_call_ld(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + return cppyy_call_T(method, self, nargs, args); +} + void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { return (void*)cppyy_call_T(method, self, nargs, args); } diff --git a/pypy/module/cppyy/test/datatypes.cxx b/pypy/module/cppyy/test/datatypes.cxx --- a/pypy/module/cppyy/test/datatypes.cxx +++ b/pypy/module/cppyy/test/datatypes.cxx @@ -2,23 +2,25 @@ //=========================================================================== -cppyy_test_data::cppyy_test_data() : m_owns_arrays(false) +CppyyTestData::CppyyTestData() : m_owns_arrays(false) { - m_bool = false; - m_char = 'a'; - m_uchar = 'c'; - m_short = -11; - m_ushort = 11u; - m_int = -22; - m_uint = 22u; - m_long = -33l; - m_ulong = 33ul; - m_llong = -44ll; - m_ullong = 55ull; - m_float = -66.f; - m_double = -77.; - m_enum = kNothing; - m_voidp = (void*)0; + m_bool = false; + m_char = 'a'; + m_schar = 'b'; + m_uchar = 'c'; + m_short = -11; + m_ushort = 11u; + m_int = -22; + m_uint = 22u; + m_long = -33l; + m_ulong = 33ul; + m_llong = -44ll; + m_ullong = 44ull; + m_float = -66.f; + m_double = -77.; + m_ldouble = -88.l; + m_enum = kNothing; + m_voidp = (void*)0; m_bool_array2 = new bool[N]; m_short_array2 = new short[N]; @@ -61,12 +63,12 @@ m_ppod = &m_pod; }; -cppyy_test_data::~cppyy_test_data() +CppyyTestData::~CppyyTestData() { destroy_arrays(); } -void cppyy_test_data::destroy_arrays() { +void CppyyTestData::destroy_arrays() { if (m_owns_arrays == true) { delete[] m_bool_array2; delete[] m_short_array2; @@ -84,134 +86,168 @@ } //- getters ----------------------------------------------------------------- -bool cppyy_test_data::get_bool() { return m_bool; } -char cppyy_test_data::get_char() { return m_char; } -unsigned char cppyy_test_data::get_uchar() { return m_uchar; } -short cppyy_test_data::get_short() { return m_short; } -unsigned short cppyy_test_data::get_ushort() { return m_ushort; } -int cppyy_test_data::get_int() { return m_int; } -unsigned int cppyy_test_data::get_uint() { return m_uint; } -long cppyy_test_data::get_long() { return m_long; } -unsigned long cppyy_test_data::get_ulong() { return m_ulong; } -long long cppyy_test_data::get_llong() { return m_llong; } -unsigned long long cppyy_test_data::get_ullong() { return m_ullong; } -float cppyy_test_data::get_float() { return m_float; } -double cppyy_test_data::get_double() { return m_double; } -cppyy_test_data::what cppyy_test_data::get_enum() { return m_enum; } -void* cppyy_test_data::get_voidp() { return m_voidp; } +bool CppyyTestData::get_bool() { return m_bool; } +char CppyyTestData::get_char() { return m_char; } +signed char CppyyTestData::get_schar() { return m_schar; } +unsigned char CppyyTestData::get_uchar() { return m_uchar; } +short CppyyTestData::get_short() { return m_short; } +unsigned short CppyyTestData::get_ushort() { return m_ushort; } +int CppyyTestData::get_int() { return m_int; } +unsigned int CppyyTestData::get_uint() { return m_uint; } +long CppyyTestData::get_long() { return m_long; } +unsigned long CppyyTestData::get_ulong() { return m_ulong; } +long long CppyyTestData::get_llong() { return m_llong; } +unsigned long long CppyyTestData::get_ullong() { return m_ullong; } +float CppyyTestData::get_float() { return m_float; } +double CppyyTestData::get_double() { return m_double; } +long double CppyyTestData::get_ldouble() { return m_ldouble; } +CppyyTestData::EWhat CppyyTestData::get_enum() { return m_enum; } +void* CppyyTestData::get_voidp() { return m_voidp; } -bool* cppyy_test_data::get_bool_array() { return m_bool_array; } -bool* cppyy_test_data::get_bool_array2() { return m_bool_array2; } -short* cppyy_test_data::get_short_array() { return m_short_array; } -short* cppyy_test_data::get_short_array2() { return m_short_array2; } -unsigned short* cppyy_test_data::get_ushort_array() { return m_ushort_array; } -unsigned short* cppyy_test_data::get_ushort_array2() { return m_ushort_array2; } -int* cppyy_test_data::get_int_array() { return m_int_array; } -int* cppyy_test_data::get_int_array2() { return m_int_array2; } -unsigned int* cppyy_test_data::get_uint_array() { return m_uint_array; } -unsigned int* cppyy_test_data::get_uint_array2() { return m_uint_array2; } -long* cppyy_test_data::get_long_array() { return m_long_array; } -long* cppyy_test_data::get_long_array2() { return m_long_array2; } -unsigned long* cppyy_test_data::get_ulong_array() { return m_ulong_array; } -unsigned long* cppyy_test_data::get_ulong_array2() { return m_ulong_array2; } +bool* CppyyTestData::get_bool_array() { return m_bool_array; } +bool* CppyyTestData::get_bool_array2() { return m_bool_array2; } +short* CppyyTestData::get_short_array() { return m_short_array; } +short* CppyyTestData::get_short_array2() { return m_short_array2; } +unsigned short* CppyyTestData::get_ushort_array() { return m_ushort_array; } +unsigned short* CppyyTestData::get_ushort_array2() { return m_ushort_array2; } +int* CppyyTestData::get_int_array() { return m_int_array; } +int* CppyyTestData::get_int_array2() { return m_int_array2; } +unsigned int* CppyyTestData::get_uint_array() { return m_uint_array; } +unsigned int* CppyyTestData::get_uint_array2() { return m_uint_array2; } +long* CppyyTestData::get_long_array() { return m_long_array; } +long* CppyyTestData::get_long_array2() { return m_long_array2; } +unsigned long* CppyyTestData::get_ulong_array() { return m_ulong_array; } +unsigned long* CppyyTestData::get_ulong_array2() { return m_ulong_array2; } -float* cppyy_test_data::get_float_array() { return m_float_array; } -float* cppyy_test_data::get_float_array2() { return m_float_array2; } -double* cppyy_test_data::get_double_array() { return m_double_array; } -double* cppyy_test_data::get_double_array2() { return m_double_array2; } +float* CppyyTestData::get_float_array() { return m_float_array; } +float* CppyyTestData::get_float_array2() { return m_float_array2; } +double* CppyyTestData::get_double_array() { return m_double_array; } +double* CppyyTestData::get_double_array2() { return m_double_array2; } -cppyy_test_pod cppyy_test_data::get_pod_val() { return m_pod; } -cppyy_test_pod* cppyy_test_data::get_pod_val_ptr() { return &m_pod; } -cppyy_test_pod& cppyy_test_data::get_pod_val_ref() { return m_pod; } -cppyy_test_pod*& cppyy_test_data::get_pod_ptrref() { return m_ppod; } +CppyyTestPod CppyyTestData::get_pod_val() { return m_pod; } +CppyyTestPod* CppyyTestData::get_pod_val_ptr() { return &m_pod; } +CppyyTestPod& CppyyTestData::get_pod_val_ref() { return m_pod; } +CppyyTestPod*& CppyyTestData::get_pod_ptrref() { return m_ppod; } -cppyy_test_pod* cppyy_test_data::get_pod_ptr() { return m_ppod; } +CppyyTestPod* CppyyTestData::get_pod_ptr() { return m_ppod; } + +//- getters const-ref ------------------------------------------------------- +const bool& CppyyTestData::get_bool_cr() { return m_bool; } +const char& CppyyTestData::get_char_cr() { return m_char; } +const signed char& CppyyTestData::get_schar_cr() { return m_schar; } +const unsigned char& CppyyTestData::get_uchar_cr() { return m_uchar; } +const short& CppyyTestData::get_short_cr() { return m_short; } +const unsigned short& CppyyTestData::get_ushort_cr() { return m_ushort; } +const int& CppyyTestData::get_int_cr() { return m_int; } +const unsigned int& CppyyTestData::get_uint_cr() { return m_uint; } +const long& CppyyTestData::get_long_cr() { return m_long; } +const unsigned long& CppyyTestData::get_ulong_cr() { return m_ulong; } +const long long& CppyyTestData::get_llong_cr() { return m_llong; } +const unsigned long long& CppyyTestData::get_ullong_cr() { return m_ullong; } +const float& CppyyTestData::get_float_cr() { return m_float; } +const double& CppyyTestData::get_double_cr() { return m_double; } +const long double& CppyyTestData::get_ldouble_cr() { return m_ldouble; } +const CppyyTestData::EWhat& CppyyTestData::get_enum_cr() { return m_enum; } //- setters ----------------------------------------------------------------- -void cppyy_test_data::set_bool(bool b) { m_bool = b; } -void cppyy_test_data::set_char(char c) { m_char = c; } -void cppyy_test_data::set_uchar(unsigned char uc) { m_uchar = uc; } -void cppyy_test_data::set_short(short s) { m_short = s; } -void cppyy_test_data::set_short_c(const short& s) { m_short = s; } -void cppyy_test_data::set_ushort(unsigned short us) { m_ushort = us; } -void cppyy_test_data::set_ushort_c(const unsigned short& us) { m_ushort = us; } -void cppyy_test_data::set_int(int i) { m_int = i; } -void cppyy_test_data::set_int_c(const int& i) { m_int = i; } -void cppyy_test_data::set_uint(unsigned int ui) { m_uint = ui; } -void cppyy_test_data::set_uint_c(const unsigned int& ui) { m_uint = ui; } -void cppyy_test_data::set_long(long l) { m_long = l; } -void cppyy_test_data::set_long_c(const long& l) { m_long = l; } -void cppyy_test_data::set_ulong(unsigned long ul) { m_ulong = ul; } -void cppyy_test_data::set_ulong_c(const unsigned long& ul) { m_ulong = ul; } -void cppyy_test_data::set_llong(long long ll) { m_llong = ll; } -void cppyy_test_data::set_llong_c(const long long& ll) { m_llong = ll; } -void cppyy_test_data::set_ullong(unsigned long long ull) { m_ullong = ull; } -void cppyy_test_data::set_ullong_c(const unsigned long long& ull) { m_ullong = ull; } -void cppyy_test_data::set_float(float f) { m_float = f; } -void cppyy_test_data::set_float_c(const float& f) { m_float = f; } -void cppyy_test_data::set_double(double d) { m_double = d; } -void cppyy_test_data::set_double_c(const double& d) { m_double = d; } -void cppyy_test_data::set_enum(what w) { m_enum = w; } -void cppyy_test_data::set_voidp(void* p) { m_voidp = p; } +void CppyyTestData::set_bool(bool b) { m_bool = b; } +void CppyyTestData::set_char(char c) { m_char = c; } +void CppyyTestData::set_schar(signed char sc) { m_schar = sc; } +void CppyyTestData::set_uchar(unsigned char uc) { m_uchar = uc; } +void CppyyTestData::set_short(short s) { m_short = s; } +void CppyyTestData::set_ushort(unsigned short us) { m_ushort = us; } +void CppyyTestData::set_int(int i) { m_int = i; } +void CppyyTestData::set_uint(unsigned int ui) { m_uint = ui; } +void CppyyTestData::set_long(long l) { m_long = l; } +void CppyyTestData::set_ulong(unsigned long ul) { m_ulong = ul; } +void CppyyTestData::set_llong(long long ll) { m_llong = ll; } +void CppyyTestData::set_ullong(unsigned long long ull) { m_ullong = ull; } +void CppyyTestData::set_float(float f) { m_float = f; } +void CppyyTestData::set_double(double d) { m_double = d; } +void CppyyTestData::set_ldouble(long double ld) { m_ldouble = ld; } +void CppyyTestData::set_enum(EWhat w) { m_enum = w; } +void CppyyTestData::set_voidp(void* p) { m_voidp = p; } -void cppyy_test_data::set_pod_val(cppyy_test_pod p) { m_pod = p; } -void cppyy_test_data::set_pod_ptr_in(cppyy_test_pod* pp) { m_pod = *pp; } -void cppyy_test_data::set_pod_ptr_out(cppyy_test_pod* pp) { *pp = m_pod; } -void cppyy_test_data::set_pod_ref(const cppyy_test_pod& rp) { m_pod = rp; } -void cppyy_test_data::set_pod_ptrptr_in(cppyy_test_pod** ppp) { m_pod = **ppp; } -void cppyy_test_data::set_pod_void_ptrptr_in(void** pp) { m_pod = **((cppyy_test_pod**)pp); } -void cppyy_test_data::set_pod_ptrptr_out(cppyy_test_pod** ppp) { delete *ppp; *ppp = new cppyy_test_pod(m_pod); } -void cppyy_test_data::set_pod_void_ptrptr_out(void** pp) { delete *((cppyy_test_pod**)pp); - *((cppyy_test_pod**)pp) = new cppyy_test_pod(m_pod); } +void CppyyTestData::set_pod_val(CppyyTestPod p) { m_pod = p; } +void CppyyTestData::set_pod_ptr_in(CppyyTestPod* pp) { m_pod = *pp; } +void CppyyTestData::set_pod_ptr_out(CppyyTestPod* pp) { *pp = m_pod; } +void CppyyTestData::set_pod_ref(const CppyyTestPod& rp) { m_pod = rp; } +void CppyyTestData::set_pod_ptrptr_in(CppyyTestPod** ppp) { m_pod = **ppp; } +void CppyyTestData::set_pod_void_ptrptr_in(void** pp) { m_pod = **((CppyyTestPod**)pp); } +void CppyyTestData::set_pod_ptrptr_out(CppyyTestPod** ppp) { delete *ppp; *ppp = new CppyyTestPod(m_pod); } +void CppyyTestData::set_pod_void_ptrptr_out(void** pp) { delete *((CppyyTestPod**)pp); + *((CppyyTestPod**)pp) = new CppyyTestPod(m_pod); } -void cppyy_test_data::set_pod_ptr(cppyy_test_pod* pp) { m_ppod = pp; } +void CppyyTestData::set_pod_ptr(CppyyTestPod* pp) { m_ppod = pp; } + +//- setters const-ref ------------------------------------------------------- +void CppyyTestData::set_bool_cr(const bool& b) { m_bool = b; } +void CppyyTestData::set_char_cr(const char& c) { m_char = c; } +void CppyyTestData::set_schar_cr(const signed char& sc) { m_schar = sc; } +void CppyyTestData::set_uchar_cr(const unsigned char& uc) { m_uchar = uc; } +void CppyyTestData::set_short_cr(const short& s) { m_short = s; } +void CppyyTestData::set_ushort_cr(const unsigned short& us) { m_ushort = us; } +void CppyyTestData::set_int_cr(const int& i) { m_int = i; } +void CppyyTestData::set_uint_cr(const unsigned int& ui) { m_uint = ui; } +void CppyyTestData::set_long_cr(const long& l) { m_long = l; } +void CppyyTestData::set_ulong_cr(const unsigned long& ul) { m_ulong = ul; } +void CppyyTestData::set_llong_cr(const long long& ll) { m_llong = ll; } +void CppyyTestData::set_ullong_cr(const unsigned long long& ull) { m_ullong = ull; } +void CppyyTestData::set_float_cr(const float& f) { m_float = f; } +void CppyyTestData::set_double_cr(const double& d) { m_double = d; } +void CppyyTestData::set_ldouble_cr(const long double& ld) { m_ldouble = ld; } +void CppyyTestData::set_enum_cr(const EWhat& w) { m_enum = w; } //- passers ----------------------------------------------------------------- -short* cppyy_test_data::pass_array(short* a) { return a; } -unsigned short* cppyy_test_data::pass_array(unsigned short* a) { return a; } -int* cppyy_test_data::pass_array(int* a) { return a; } -unsigned int* cppyy_test_data::pass_array(unsigned int* a) { return a; } -long* cppyy_test_data::pass_array(long* a) { return a; } -unsigned long* cppyy_test_data::pass_array(unsigned long* a) { return a; } -float* cppyy_test_data::pass_array(float* a) { return a; } -double* cppyy_test_data::pass_array(double* a) { return a; } +short* CppyyTestData::pass_array(short* a) { return a; } +unsigned short* CppyyTestData::pass_array(unsigned short* a) { return a; } +int* CppyyTestData::pass_array(int* a) { return a; } +unsigned int* CppyyTestData::pass_array(unsigned int* a) { return a; } +long* CppyyTestData::pass_array(long* a) { return a; } +unsigned long* CppyyTestData::pass_array(unsigned long* a) { return a; } +float* CppyyTestData::pass_array(float* a) { return a; } +double* CppyyTestData::pass_array(double* a) { return a; } -char cppyy_test_data::s_char = 's'; -unsigned char cppyy_test_data::s_uchar = 'u'; -short cppyy_test_data::s_short = -101; -unsigned short cppyy_test_data::s_ushort = 255u; -int cppyy_test_data::s_int = -202; -unsigned int cppyy_test_data::s_uint = 202u; -long cppyy_test_data::s_long = -303l; -unsigned long cppyy_test_data::s_ulong = 303ul; -long long cppyy_test_data::s_llong = -404ll; -unsigned long long cppyy_test_data::s_ullong = 505ull; -float cppyy_test_data::s_float = -606.f; -double cppyy_test_data::s_double = -707.; -cppyy_test_data::what cppyy_test_data::s_enum = cppyy_test_data::kNothing; -void* cppyy_test_data::s_voidp = (void*)0; +bool CppyyTestData::s_bool = false; +char CppyyTestData::s_char = 'c'; +signed char CppyyTestData::s_schar = 's'; +unsigned char CppyyTestData::s_uchar = 'u'; +short CppyyTestData::s_short = -101; +unsigned short CppyyTestData::s_ushort = 255u; +int CppyyTestData::s_int = -202; +unsigned int CppyyTestData::s_uint = 202u; +long CppyyTestData::s_long = -303l; +unsigned long CppyyTestData::s_ulong = 303ul; +long long CppyyTestData::s_llong = -404ll; +unsigned long long CppyyTestData::s_ullong = 404ull; +float CppyyTestData::s_float = -606.f; +double CppyyTestData::s_double = -707.; +long double CppyyTestData::s_ldouble = -808.l; +CppyyTestData::EWhat CppyyTestData::s_enum = CppyyTestData::kNothing; +void* CppyyTestData::s_voidp = (void*)0; //- strings ----------------------------------------------------------------- -const char* cppyy_test_data::get_valid_string(const char* in) { return in; } -const char* cppyy_test_data::get_invalid_string() { return (const char*)0; } +const char* CppyyTestData::get_valid_string(const char* in) { return in; } +const char* CppyyTestData::get_invalid_string() { return (const char*)0; } //= global functions ======================================================== -long get_pod_address(cppyy_test_data& c) +long get_pod_address(CppyyTestData& c) { return (long)&c.m_pod; } -long get_int_address(cppyy_test_data& c) +long get_int_address(CppyyTestData& c) { return (long)&c.m_pod.m_int; } -long get_double_address(cppyy_test_data& c) +long get_double_address(CppyyTestData& c) { return (long)&c.m_pod.m_double; } + //= global variables/pointers =============================================== int g_int = 42; @@ -223,20 +259,20 @@ return g_int; } -cppyy_test_pod* g_pod = (cppyy_test_pod*)0; +CppyyTestPod* g_pod = (CppyyTestPod*)0; -bool is_global_pod(cppyy_test_pod* t) { +bool is_global_pod(CppyyTestPod* t) { return t == g_pod; } -void set_global_pod(cppyy_test_pod* t) { +void set_global_pod(CppyyTestPod* t) { g_pod = t; } -cppyy_test_pod* get_global_pod() { +CppyyTestPod* get_global_pod() { return g_pod; } -cppyy_test_pod* get_null_pod() { - return (cppyy_test_pod*)0; +CppyyTestPod* get_null_pod() { + return (CppyyTestPod*)0; } diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -2,22 +2,22 @@ //=========================================================================== -struct cppyy_test_pod { +struct CppyyTestPod { int m_int; double m_double; }; //=========================================================================== -enum fruit { kApple=78, kBanana=29, kCitrus=34 }; +enum EFruit { kApple=78, kBanana=29, kCitrus=34 }; //=========================================================================== -class four_vector { +class FourVector { public: - four_vector(double x, double y, double z, double t) : + FourVector(double x, double y, double z, double t) : m_cc_called(false), m_x(x), m_y(y), m_z(z), m_t(t) {} - four_vector(const four_vector& s) : + FourVector(const FourVector& s) : m_cc_called(true), m_x(s.m_x), m_y(s.m_y), m_z(s.m_z), m_t(s.m_t) {} double operator[](int i) { @@ -28,7 +28,7 @@ return -1; } - bool operator==(const four_vector& o) { + bool operator==(const FourVector& o) { return (m_x == o.m_x && m_y == o.m_y && m_z == o.m_z && m_t == o.m_t); } @@ -42,13 +42,13 @@ //=========================================================================== -class cppyy_test_data { +class CppyyTestData { public: - cppyy_test_data(); - ~cppyy_test_data(); + CppyyTestData(); + ~CppyyTestData(); // special cases - enum what { kNothing=6, kSomething=111, kLots=42 }; + enum EWhat { kNothing=6, kSomething=111, kLots=42 }; // helper void destroy_arrays(); @@ -56,6 +56,7 @@ // getters bool get_bool(); char get_char(); + signed char get_schar(); unsigned char get_uchar(); short get_short(); unsigned short get_ushort(); @@ -67,7 +68,8 @@ unsigned long long get_ullong(); float get_float(); double get_double(); - what get_enum(); + long double get_ldouble(); + EWhat get_enum(); void* get_voidp(); bool* get_bool_array(); @@ -90,50 +92,78 @@ double* get_double_array(); double* get_double_array2(); - cppyy_test_pod get_pod_val(); // for m_pod - cppyy_test_pod* get_pod_val_ptr(); - cppyy_test_pod& get_pod_val_ref(); - cppyy_test_pod*& get_pod_ptrref(); + CppyyTestPod get_pod_val(); // for m_pod + CppyyTestPod* get_pod_val_ptr(); + CppyyTestPod& get_pod_val_ref(); + CppyyTestPod*& get_pod_ptrref(); - cppyy_test_pod* get_pod_ptr(); // for m_ppod + CppyyTestPod* get_pod_ptr(); // for m_ppod + +// getters const-ref + const bool& get_bool_cr(); + const char& get_char_cr(); + const signed char& get_schar_cr(); + const unsigned char& get_uchar_cr(); + const short& get_short_cr(); + const unsigned short& get_ushort_cr(); + const int& get_int_cr(); + const unsigned int& get_uint_cr(); + const long& get_long_cr(); + const unsigned long& get_ulong_cr(); + const long long& get_llong_cr(); + const unsigned long long& get_ullong_cr(); + const float& get_float_cr(); + const double& get_double_cr(); + const long double& get_ldouble_cr(); + const EWhat& get_enum_cr(); // setters - void set_bool(bool b); - void set_char(char c); - void set_uchar(unsigned char uc); - void set_short(short s); - void set_short_c(const short& s); - void set_ushort(unsigned short us); - void set_ushort_c(const unsigned short& us); - void set_int(int i); - void set_int_c(const int& i); - void set_uint(unsigned int ui); - void set_uint_c(const unsigned int& ui); - void set_long(long l); - void set_long_c(const long& l); - void set_llong(long long ll); - void set_llong_c(const long long& ll); - void set_ulong(unsigned long ul); - void set_ulong_c(const unsigned long& ul); - void set_ullong(unsigned long long ll); - void set_ullong_c(const unsigned long long& ll); - void set_float(float f); - void set_float_c(const float& f); - void set_double(double d); - void set_double_c(const double& d); - void set_enum(what w); - void set_voidp(void* p); + void set_bool(bool); + void set_char(char); + void set_schar(signed char); + void set_uchar(unsigned char); + void set_short(short); + void set_ushort(unsigned short); + void set_int(int); + void set_uint(unsigned int); + void set_long(long); + void set_ulong(unsigned long); + void set_llong(long long); + void set_ullong(unsigned long long); + void set_float(float); + void set_double(double); + void set_ldouble(long double); + void set_enum(EWhat); + void set_voidp(void*); - void set_pod_val(cppyy_test_pod); // for m_pod - void set_pod_ptr_in(cppyy_test_pod*); - void set_pod_ptr_out(cppyy_test_pod*); - void set_pod_ref(const cppyy_test_pod&); - void set_pod_ptrptr_in(cppyy_test_pod**); + void set_pod_val(CppyyTestPod); // for m_pod + void set_pod_ptr_in(CppyyTestPod*); + void set_pod_ptr_out(CppyyTestPod*); + void set_pod_ref(const CppyyTestPod&); + void set_pod_ptrptr_in(CppyyTestPod**); void set_pod_void_ptrptr_in(void**); - void set_pod_ptrptr_out(cppyy_test_pod**); + void set_pod_ptrptr_out(CppyyTestPod**); void set_pod_void_ptrptr_out(void**); - void set_pod_ptr(cppyy_test_pod*); // for m_ppod + void set_pod_ptr(CppyyTestPod*); // for m_ppod + +// setters const-ref + void set_bool_cr(const bool&); + void set_char_cr(const char&); + void set_schar_cr(const signed char&); + void set_uchar_cr(const unsigned char&); + void set_short_cr(const short&); + void set_ushort_cr(const unsigned short&); + void set_int_cr(const int&); + void set_uint_cr(const unsigned int&); + void set_long_cr(const long&); + void set_ulong_cr(const unsigned long&); + void set_llong_cr(const long long&); + void set_ullong_cr(const unsigned long long&); + void set_float_cr(const float&); + void set_double_cr(const double&); + void set_ldouble_cr(const long double&); + void set_enum_cr(const EWhat&); // passers short* pass_array(short*); @@ -162,6 +192,7 @@ // basic types bool m_bool; char m_char; + signed char m_schar; unsigned char m_uchar; short m_short; unsigned short m_ushort; @@ -173,7 +204,8 @@ unsigned long long m_ullong; float m_float; double m_double; - what m_enum; + long double m_ldouble; + EWhat m_enum; void* m_voidp; // array types @@ -198,11 +230,13 @@ double* m_double_array2; // object types - cppyy_test_pod m_pod; - cppyy_test_pod* m_ppod; + CppyyTestPod m_pod; + CppyyTestPod* m_ppod; public: + static bool s_bool; static char s_char; + static signed char s_schar; static unsigned char s_uchar; static short s_short; static unsigned short s_ushort; @@ -214,7 +248,8 @@ static unsigned long long s_ullong; static float s_float; static double s_double; - static what s_enum; + static long double s_ldouble; + static EWhat s_enum; static void* s_voidp; private: @@ -223,9 +258,9 @@ //= global functions ======================================================== -long get_pod_address(cppyy_test_data& c); -long get_int_address(cppyy_test_data& c); -long get_double_address(cppyy_test_data& c); +long get_pod_address(CppyyTestData& c); +long get_int_address(CppyyTestData& c); +long get_double_address(CppyyTestData& c); //= global variables/pointers =============================================== @@ -233,8 +268,8 @@ void set_global_int(int i); int get_global_int(); -extern cppyy_test_pod* g_pod; -bool is_global_pod(cppyy_test_pod* t); -void set_global_pod(cppyy_test_pod* t); -cppyy_test_pod* get_global_pod(); -cppyy_test_pod* get_null_pod(); +extern CppyyTestPod* g_pod; +bool is_global_pod(CppyyTestPod* t); +void set_global_pod(CppyyTestPod* t); +CppyyTestPod* get_global_pod(); +CppyyTestPod* get_null_pod(); diff --git a/pypy/module/cppyy/test/datatypes.xml b/pypy/module/cppyy/test/datatypes.xml --- a/pypy/module/cppyy/test/datatypes.xml +++ b/pypy/module/cppyy/test/datatypes.xml @@ -1,9 +1,10 @@ - - + + + - + diff --git a/pypy/module/cppyy/test/datatypes_LinkDef.h b/pypy/module/cppyy/test/datatypes_LinkDef.h --- a/pypy/module/cppyy/test/datatypes_LinkDef.h +++ b/pypy/module/cppyy/test/datatypes_LinkDef.h @@ -4,20 +4,20 @@ #pragma link off all classes; #pragma link off all functions; -#pragma link C++ struct cppyy_test_pod; -#pragma link C++ class cppyy_test_data; -#pragma link C++ class four_vector; +#pragma link C++ struct CppyyTestPod; +#pragma link C++ class CppyyTestData; +#pragma link C++ class FourVector; -#pragma link C++ enum fruit; +#pragma link C++ enum EFruit; -#pragma link C++ function get_pod_address(cppyy_test_data&); -#pragma link C++ function get_int_address(cppyy_test_data&); -#pragma link C++ function get_double_address(cppyy_test_data&); +#pragma link C++ function get_pod_address(CppyyTestData&); +#pragma link C++ function get_int_address(CppyyTestData&); +#pragma link C++ function get_double_address(CppyyTestData&); #pragma link C++ function set_global_int(int); #pragma link C++ function get_global_int(); -#pragma link C++ function is_global_pod(cppyy_test_pod*); -#pragma link C++ function set_global_pod(cppyy_test_pod*); +#pragma link C++ function is_global_pod(CppyyTestPod*); +#pragma link C++ function set_global_pod(CppyyTestPod*); #pragma link C++ function get_global_pod(); #pragma link C++ function get_null_pod(); diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -22,78 +22,72 @@ return cppyy.load_reflection_info(%r)""" % (test_dct, )) def test01_load_reflection_cache(self): - """Test whether loading a refl. info twice results in the same object.""" + """Loading reflection info twice should result in the same object""" import cppyy lib2 = cppyy.load_reflection_info(self.test_dct) assert self.datatypes is lib2 def test02_instance_data_read_access(self): - """Test read access to instance public data and verify values""" + """Read access to instance public data and verify values""" import cppyy - cppyy_test_data = cppyy.gbl.cppyy_test_data + CppyyTestData = cppyy.gbl.CppyyTestData - c = cppyy_test_data() - assert isinstance(c, cppyy_test_data) + c = CppyyTestData() + assert isinstance(c, CppyyTestData) # reading boolean type assert c.m_bool == False # reading char types assert c.m_char == 'a' + assert c.m_schar == 'b' assert c.m_uchar == 'c' # reading integer types - assert c.m_short == -11 - assert c.m_ushort == 11 - assert c.m_int == -22 - assert c.m_uint == 22 - assert c.m_long == -33 - assert c.m_ulong == 33 - assert c.m_llong == -44 - assert c.m_ullong == 55 + assert c.m_short == -11; assert c.get_short_cr() == -11 + assert c.m_ushort == 11; assert c.get_ushort_cr() == 11 + assert c.m_int == -22; assert c.get_int_cr() == -22 + assert c.m_uint == 22; assert c.get_uint_cr() == 22 + assert c.m_long == -33; assert c.get_long_cr() == -33 + assert c.m_ulong == 33; assert c.get_ulong_cr() == 33 + assert c.m_llong == -44; assert c.get_llong_cr() == -44 + assert c.m_ullong == 44; assert c.get_ullong_cr() == 44 # reading floating point types - assert round(c.m_float + 66., 5) == 0 - assert round(c.m_double + 77., 8) == 0 + assert round(c.m_float + 66., 5) == 0 + assert round(c.get_float_cr() + 66., 5) == 0 + assert round(c.m_double + 77., 11) == 0 + assert round(c.get_double_cr() + 77., 11) == 0 + assert round(c.m_ldouble + 88., 24) == 0 + assert round(c.get_ldouble_cr() + 88., 24) == 0 - # reding of array types + # reading of enum types + assert c.m_enum == CppyyTestData.kNothing + assert c.m_enum == c.kNothing + + # reading of boolean array for i in range(self.N): - # reading of integer array types assert c.m_bool_array[i] == bool(i%2) assert c.get_bool_array()[i] == bool(i%2) assert c.m_bool_array2[i] == bool((i+1)%2) assert c.get_bool_array2()[i] == bool((i+1)%2) - assert c.m_short_array[i] == -1*i - assert c.get_short_array()[i] == -1*i - assert c.m_short_array2[i] == -2*i - assert c.get_short_array2()[i] == -2*i - assert c.m_ushort_array[i] == 3*i - assert c.get_ushort_array()[i] == 3*i - assert c.m_ushort_array2[i] == 4*i - assert c.get_ushort_array2()[i] == 4*i - assert c.m_int_array[i] == -5*i - assert c.get_int_array()[i] == -5*i - assert c.m_int_array2[i] == -6*i - assert c.get_int_array2()[i] == -6*i - assert c.m_uint_array[i] == 7*i - assert c.get_uint_array()[i] == 7*i - assert c.m_uint_array2[i] == 8*i - assert c.get_uint_array2()[i] == 8*i - assert c.m_long_array[i] == -9*i - assert c.get_long_array()[i] == -9*i - assert c.m_long_array2[i] == -10*i - assert c.get_long_array2()[i] == -10*i - assert c.m_ulong_array[i] == 11*i - assert c.get_ulong_array()[i] == 11*i - assert c.m_ulong_array2[i] == 12*i - assert c.get_ulong_array2()[i] == 12*i + # reading of integer array types + names = [ 'short', 'ushort', 'int', 'uint', 'long', 'ulong'] + alpha = [(-1, -2), (3, 4), (-5, -6), (7, 8), (-9, -10), (11, 12)] + for j in range(self.N): + assert getattr(c, 'm_%s_array' % names[i])[i] == alpha[i][0]*i + assert getattr(c, 'get_%s_array' % names[i])()[i] == alpha[i][0]*i + assert getattr(c, 'm_%s_array2' % names[i])[i] == alpha[i][1]*i + assert getattr(c, 'get_%s_array2' % names[i])()[i] == alpha[i][1]*i - assert round(c.m_float_array[i] + 13.*i, 5) == 0 - assert round(c.m_float_array2[i] + 14.*i, 5) == 0 - assert round(c.m_double_array[i] + 15.*i, 8) == 0 - assert round(c.m_double_array2[i] + 16.*i, 8) == 0 + # reading of floating point array types + for k in range(self.N): + assert round(c.m_float_array[k] + 13.*k, 5) == 0 + assert round(c.m_float_array2[k] + 14.*k, 5) == 0 + assert round(c.m_double_array[k] + 15.*k, 8) == 0 + assert round(c.m_double_array2[k] + 16.*k, 8) == 0 # out-of-bounds checks raises(IndexError, c.m_short_array.__getitem__, self.N) @@ -106,22 +100,22 @@ raises(IndexError, c.m_double_array.__getitem__, self.N) # can not access an instance member on the class - raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') - raises(ReferenceError, getattr, cppyy_test_data, 'm_int') + raises(ReferenceError, getattr, CppyyTestData, 'm_bool') + raises(ReferenceError, getattr, CppyyTestData, 'm_int') - assert not hasattr(cppyy_test_data, 'm_bool') - assert not hasattr(cppyy_test_data, 'm_int') + assert not hasattr(CppyyTestData, 'm_bool') From noreply at buildbot.pypy.org Tue May 6 10:34:51 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 6 May 2014 10:34:51 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: use cerr instead of cout for error message Message-ID: <20140506083451.6B19D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71318:2c4631ee1b8e Date: 2014-05-05 18:17 -0700 http://bitbucket.org/pypy/pypy/changeset/2c4631ee1b8e/ Log: use cerr instead of cout for error message diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -506,7 +506,7 @@ } else { for (std::map::iterator it = s_methods.begin(); it != s_methods.end(); ++it) { - if (it->second == idx) std::cout << "MISSING: " << it->first << std::endl; + if (it->second == idx) std::cerr << "MISSING: " << it->first << std::endl; } assert(!"method unknown in cppyy_call_v"); } @@ -730,7 +730,7 @@ } else { for (std::map::iterator it = s_methods.begin(); it != s_methods.end(); ++it) { - if (it->second == idx) std::cout << "MISSING: " << it->first << std::endl; + if (it->second == idx) std::cerr << "MISSING: " << it->first << std::endl; } assert(!"method unknown in cppyy_call_l"); } @@ -845,7 +845,7 @@ } else { for (std::map::iterator it = s_methods.begin(); it != s_methods.end(); ++it) { - if (it->second == idx) std::cout << "MISSING: " << it->first << std::endl; + if (it->second == idx) std::cerr << "MISSING: " << it->first << std::endl; } assert(!"method unknown in cppyy_call_r"); } From noreply at buildbot.pypy.org Tue May 6 10:34:52 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 6 May 2014 10:34:52 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: conform CINT backend Message-ID: <20140506083452.AE72D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71319:79192a821c7f Date: 2014-05-05 18:17 -0700 http://bitbucket.org/pypy/pypy/changeset/79192a821c7f/ Log: conform CINT backend diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -398,7 +398,7 @@ class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): _immutable_fields_ = ['default'] - typecode = '?' + typecode = 'Q' def __init__(self, space, default): if default: @@ -749,7 +749,7 @@ # TODO: this is missing several cases if compound == "&": return _converters['const unsigned int&'](space, default) - return _converters['unsigned int'+compound](space, default) + return _converters['unsigned int'](space, default) # 5) void converter, which fails on use # diff --git a/pypy/module/cppyy/src/cintcwrapper.cxx b/pypy/module/cppyy/src/cintcwrapper.cxx --- a/pypy/module/cppyy/src/cintcwrapper.cxx +++ b/pypy/module/cppyy/src/cintcwrapper.cxx @@ -231,6 +231,11 @@ libp->para[i].type = 'd'; break; } + case 'Q': { + libp->para[i].ref = (long)&libp->para[i].obj.i; + libp->para[i].type = 'q'; + break; + } } } } @@ -473,6 +478,11 @@ return G__double(result); } +long double cppyy_call_ld(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + G__value result = cppyy_call_T(method, self, nargs, args); + return G__Longdouble(result); +} + void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { G__value result = cppyy_call_T(method, self, nargs, args); return (void*)result.ref; diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -1,5 +1,7 @@ import py, os, sys +from pypy.module.cppyy import capi + currpath = py.path.local(__file__).dirpath() test_dct = str(currpath.join("datatypesDict.so")) @@ -17,6 +19,7 @@ def setup_class(cls): cls.w_N = cls.space.wrap(5) # should be imported from the dictionary cls.w_test_dct = cls.space.wrap(test_dct) + cls.w_capi_identity = cls.space.wrap(capi.identify()) cls.w_datatypes = cls.space.appexec([], """(): import cppyy return cppyy.load_reflection_info(%r)""" % (test_dct, )) @@ -196,9 +199,10 @@ c.set_ldouble_cr(0.902); assert round(c.m_ldouble - 0.902, 24) == 0 # enum types - c.m_enum = CppyyTestData.kSomething; assert c.get_enum() == c.kSomething - c.set_enum(CppyyTestData.kLots); assert c.m_enum == c.kLots - c.set_enum_cr(CppyyTestData.kLots ); assert c.m_enum == c.kLots + c.m_enum = CppyyTestData.kSomething; assert c.get_enum() == c.kSomething + c.set_enum(CppyyTestData.kLots); assert c.m_enum == c.kLots + if self.capi_identity != 'CINT': # TODO: not understood + c.set_enum_cr(CppyyTestData.kNothing); assert c.m_enum == c.kNothing # arrays; there will be pointer copies, so destroy the current ones c.destroy_arrays() From noreply at buildbot.pypy.org Tue May 6 10:34:53 2014 From: noreply at buildbot.pypy.org (wlav) Date: Tue, 6 May 2014 10:34:53 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: rpython fixes (r_longfloat not being fully supported) Message-ID: <20140506083453.F2BFF1C01DE@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71320:2886bd193622 Date: 2014-05-06 01:33 -0700 http://bitbucket.org/pypy/pypy/changeset/2886bd193622/ Log: rpython fixes (r_longfloat not being fully supported) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -3,8 +3,8 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import r_singlefloat -from rpython.rlib import jit_libffi, rfloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat +from rpython.rlib import jit, jit_libffi, rfloat from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance @@ -247,6 +247,10 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = call_local + def to_memory(self, space, w_obj, w_value, offset): + self._is_abstract(space) + + class IntTypeConverterMixin(NumericTypeConverterMixin): _mixin_ = True @@ -330,7 +334,7 @@ class ConstRefCharConverter(ffitypes.typeid(rffi.CHAR), ConstRefCharTypeConverterMixin, TypeConverter): - _immuteable_ = True + _immutable_ = True uses_local = True libffitype = jit_libffi.types.pointer @@ -347,7 +351,7 @@ class ConstRefSCharConverter(ffitypes.typeid(rffi.SIGNEDCHAR), ConstRefCharTypeConverterMixin, TypeConverter): - _immuteable_ = True + _immutable_ = True uses_local = True libffitype = jit_libffi.types.pointer @@ -372,7 +376,6 @@ class ConstFloatRefConverter(FloatConverter): _immutable_fields_ = ['libffitype', 'typecode'] - libffitype = jit_libffi.types.pointer typecode = 'F' @@ -391,41 +394,92 @@ class ConstDoubleRefConverter(ConstRefNumericTypeConverterMixin, DoubleConverter): _immutable_fields_ = ['libffitype', 'typecode'] - libffitype = jit_libffi.types.pointer typecode = 'D' class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): - _immutable_fields_ = ['default'] + _immutable_fields_ = ['default', 'typecode'] typecode = 'Q' + @jit.dont_look_inside def __init__(self, space, default): + # TODO: loses precision if default: self.default = rffi.cast(self.c_type, rfloat.rstring_to_float(default)) else: self.default = rffi.cast(self.c_type, 0.) def default_argument_libffi(self, space, address): - # suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible + @jit.dont_look_inside def from_memory(self, space, w_obj, w_pycppclass, offset): address = self._get_raw_address(space, w_obj, offset) rffiptr = rffi.cast(self.c_ptrtype, address) - # TODO: this looses precision + # TODO: this loses precision, but r_longfloat can not be wrapped return space.wrap(float(rffiptr[0])) + # repeats to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + @jit.dont_look_inside + def convert_argument(self, space, w_obj, address, call_local): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = self.typecode + + @jit.dont_look_inside + def convert_argument_libffi(self, space, w_obj, address, call_local): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self._unwrap_object(space, w_obj) + + @jit.dont_look_inside + def default_argument_libffi(self, space, address): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self.default + + @jit.dont_look_inside + def to_memory(self, space, w_obj, w_value, offset): + address = self._get_raw_address(space, w_obj, offset) + rffiptr = rffi.cast(self.c_ptrtype, address) + rffiptr[0] = self._unwrap_object(space, w_value) + + class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['libffitype'] libffitype = jit_libffi.types.pointer + def convert_argument_libffi(self, space, w_obj, address, call_local): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + def default_argument_libffi(self, space, address): # suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" from pypy.module.cppyy.interp_cppyy import FastCallNotPossible raise FastCallNotPossible + @jit.dont_look_inside + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = self._get_raw_address(space, w_obj, offset) + rffiptr = rffi.cast(self.c_ptrtype, address) + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(rffiptr[0])) + + # repeatss to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" + @jit.dont_look_inside + def convert_argument(self, space, w_obj, address, call_local): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = self.typecode + + @jit.dont_look_inside + def default_argument_libffi(self, space, address): + x = rffi.cast(self.c_ptrtype, address) + x[0] = self.default + + class CStringConverter(TypeConverter): def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -127,6 +127,10 @@ _immutable_ = True c_stubcall = staticmethod(capi.c_call_ld) + def _wrap_object(self, space, obj): + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(obj)) + @jit.dont_look_inside def execute(self, space, cppmethod, cppthis, num_args, args): result = self.c_stubcall(space, cppmethod, cppthis, num_args, args) @@ -143,6 +147,15 @@ libffitype = jit_libffi.types.pointer @jit.dont_look_inside + def set_item(self, space, w_item): + self.item = self._unwrap_object(space, w_item) + self.do_assign = True + + def _wrap_object(self, space, obj): + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(rffi.cast(self.c_type, obj))) + + @jit.dont_look_inside def execute(self, space, cppmethod, cppthis, num_args, args): result = capi.c_call_r(space, cppmethod, cppthis, num_args, args) return self._wrap_reference(space, rffi.cast(self.c_ptrtype, result)) diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -208,11 +208,10 @@ c_ptrtype = rffi.LONGDOUBLEP def _unwrap_object(self, space, w_obj): - # TODO: this looses precision - return r_longfloat(space.float_w(w_obj)) + return rffi.cast(rffi.LONGDOUBLE, r_longfloat(space.float_w(w_obj))) def _wrap_object(self, space, obj): - # TODO: this looses precision + # TODO: this loses precision return space.wrap(float(obj)) From noreply at buildbot.pypy.org Tue May 6 11:06:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 11:06:20 +0200 (CEST) Subject: [pypy-commit] cffi default: Precision Message-ID: <20140506090620.7A8121C155F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1510:f90ae6e5ea6a Date: 2014-05-06 11:05 +0200 http://bitbucket.org/cffi/cffi/changeset/f90ae6e5ea6a/ Log: Precision diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1201,7 +1201,8 @@ ``<_cffi_backend.buffer object>``. This has been fixed. But you should avoid using ``str(buf)``: it now gives inconsistent results between Python 2 and Python 3 (this is similar to how ``str()`` - gives inconsistent results on regular byte strings). + gives inconsistent results on regular byte strings). Use ``buf[:]`` + instead. ``ffi.typeof("C type" or cdata object)``: return an object of type From noreply at buildbot.pypy.org Tue May 6 11:06:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 11:06:21 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge release-0.8 Message-ID: <20140506090621.AECC91C155F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1511:0157531d9a16 Date: 2014-05-06 11:05 +0200 http://bitbucket.org/cffi/cffi/changeset/0157531d9a16/ Log: hg merge release-0.8 diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -45,7 +45,7 @@ Installation and Status ======================================================= -Quick installation: +Quick installation (for cpython, cffi is distributed with PyPy): * ``pip install cffi`` @@ -60,10 +60,10 @@ left. It supports CPython 2.6; 2.7; 3.x (tested with 3.2 and 3.3); -and PyPy 2.0 beta2 or later. +and is distrubuted with PyPy 2.0 beta2 or later. Its speed is comparable to ctypes on CPython (a bit faster but a higher -warm-up time). It is already faster on PyPy (1.5x-2x), but not yet +warm-up time). It is already faster than ctypes on PyPy (1.5x-2x), but not yet *much* faster; stay tuned. Requirements: From noreply at buildbot.pypy.org Tue May 6 11:08:17 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 6 May 2014 11:08:17 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add dls2014 (technical report for now) Message-ID: <20140506090817.E1C7B1C317E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5245:7fd24a549a9a Date: 2014-05-06 10:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/7fd24a549a9a/ Log: add dls2014 (technical report for now) diff too long, truncating to 2000 out of 2422 lines diff --git a/talk/dls2014/IEEEbib.bst b/talk/dls2014/IEEEbib.bst new file mode 100644 --- /dev/null +++ b/talk/dls2014/IEEEbib.bst @@ -0,0 +1,1023 @@ +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% IEEE.bst %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Bibliography Syle file for articles according to IEEE instructions +% balemi at aut.ee.ethz.ch <22-JUN-93> +% modified from unsrt.bib. Contributions by Richard H. Roy + +ENTRY + { address + author + booktitle + chapter + edition + editor + howpublished + institution + journal + key + month + note + number + organization + pages + publisher + school + series + title + type + volume + year + } + {} + { label } + +INTEGERS { output.state before.all mid.sentence after.sentence after.block } + +FUNCTION {init.state.consts} +{ #0 'before.all := + #1 'mid.sentence := + #2 'after.sentence := + #3 'after.block := +} + +STRINGS { s t } + +FUNCTION {output.nonnull} +{ 's := + output.state mid.sentence = + { ", " * write$ } + { output.state after.block = +% next line commented out by rhr and changed to write comma +% { add.period$ write$ + { ", " * write$ + newline$ + "\newblock " write$ + } + { output.state before.all = + 'write$ + { add.period$ " " * write$ } + if$ + } + if$ + mid.sentence 'output.state := + } + if$ + s +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.check} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +FUNCTION {output.bibitem} +{ newline$ + "\bibitem{" write$ + cite$ write$ + "}" write$ + newline$ + "" + before.all 'output.state := +} + +FUNCTION {fin.entry} +{ add.period$ + write$ + newline$ +} + +% 5/24/89 rhr +% modified fin.entry function - prints note field after body of entry +%FUNCTION {fin.entry} +%{ add.period$ +% note empty$ +% 'write$ +% { "\par\bgroup\parindent=0em " * annote * "\par\egroup " * write$ +% } +% if$ +% newline$ +%} + +FUNCTION {new.block} +{ output.state before.all = + 'skip$ + { after.block 'output.state := } + if$ +} + +% new block without terminating last block with a comma +FUNCTION {new.ncblock} +{ + write$ + newline$ + "\newblock " + before.all 'output.state := +} + +FUNCTION {new.nccont} +{ + write$ + " " + before.all 'output.state := +} + +FUNCTION {new.sentence} +{ output.state after.block = + 'skip$ + { output.state before.all = + 'skip$ + { after.sentence 'output.state := } + if$ + } + if$ +} + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ 'skip$ + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + 'skip$ + if$ +} + +FUNCTION {new.block.checka} +{ empty$ + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.block.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.sentence.checka} +{ empty$ + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {new.sentence.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {field.or.null} +{ duplicate$ empty$ + { pop$ "" } + 'skip$ + if$ +} + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "{\em " swap$ * "}" * } + if$ +} + +FUNCTION {boldface} +{ duplicate$ empty$ + { pop$ "" } + { "{\bf " swap$ * "}" * } + if$ +} + +%FUNCTION {boldface} +%{ 's swap$ := +% s "" = +% { "" } +% { "{\bf " s * "}" * } +% if$ +%} +% +INTEGERS { nameptr namesleft numnames } + +FUNCTION {format.names} +{ 's := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't := + nameptr #1 > + { namesleft #1 > + { ", " * t * } + { numnames #2 > + { "," * } + 'skip$ + if$ + t "others" = + { " et~al." * } + { " and " * t * } + if$ + } + if$ + } + 't + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {format.authors} +{ author empty$ + { "" } + { author format.names } + if$ +} + +FUNCTION {format.editors} +{ editor empty$ + { "" } + { editor format.names + editor num.names$ #1 > + { ", Eds." * } + { ", Ed." * } + if$ + } + if$ +} + +FUNCTION {format.title} +{ title empty$ + { "" } + { "``" title "t" change.case$ * } + if$ +} + +FUNCTION {n.dashify} +{ 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + +FUNCTION {format.date} +{ year empty$ + { month empty$ + { "" } + { "there's a month but no year in " cite$ * warning$ + month + } + if$ + } + { month empty$ + 'year + { month " " * year * } + if$ + } + if$ +} + +% FUNCTION {format.date} +% { year empty$ +% 'year +% { " " year * } +% if$ +% } + +FUNCTION {format.btitle} +{ title emphasize +} + +FUNCTION {tie.or.space.connect} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ * * +} + +FUNCTION {either.or.check} +{ empty$ + 'pop$ + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {format.bvolume} +{ volume empty$ + { "" } + { "vol." volume tie.or.space.connect + series empty$ + 'skip$ + { " of " * series emphasize * } + if$ + "volume and number" number either.or.check + } + if$ +} + +FUNCTION {format.number.series} +{ volume empty$ + { number empty$ + { series field.or.null } + { output.state mid.sentence = + { "number" } + { "Number" } + if$ + number tie.or.space.connect + series empty$ + { "there's a number but no series in " cite$ * warning$ } + { " in " * series * } + if$ + } + if$ + } + { "" } + if$ +} + +FUNCTION {format.edition} +{ edition empty$ + { "" } + { output.state mid.sentence = + { edition "l" change.case$ " edition" * } + { edition "t" change.case$ " edition" * } + if$ + } + if$ +} + +INTEGERS { multiresult } + +FUNCTION {multi.page.check} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {format.pages} +{ pages empty$ + { "" } + { pages multi.page.check + { "pp." pages n.dashify tie.or.space.connect } + { "p." pages tie.or.space.connect } + if$ + } + if$ +} + +FUNCTION {format.vol.num.pages} +{ +volume empty$ + {"" } + {"vol. " volume *} +if$ +number empty$ + 'skip$ + {", no. " number * *} +if$ +pages empty$ + 'skip$ + { duplicate$ empty$ + { pop$ format.pages } + { ", pp. " * pages n.dashify * } + if$ + } +if$ +} + +%FUNCTION {format.vol.num.pages} +%%boldface added 3/17/87 rhr +%{ volume field.or.null boldface +% number empty$ +% 'skip$ +% { "(" number * ")" * * +% volume empty$ +% { "there's a number but no volume in " cite$ * warning$ } +% 'skip$ +% if$ +% } +% if$ +% pages empty$ +% 'skip$ +% { duplicate$ empty$ +% { pop$ format.pages } +% { ":" * pages n.dashify * } +% if$ +% } +% if$ +%} + +FUNCTION {format.chapter.pages} +{ chapter empty$ + 'format.pages + { type empty$ + { "chapter" } + { type "l" change.case$ } + if$ + chapter tie.or.space.connect + pages empty$ + 'skip$ + { ", " * format.pages * } + if$ + } + if$ +} + +FUNCTION {format.in.ed.booktitle} +{ booktitle empty$ + { "" } + { editor empty$ + { "in " booktitle emphasize * } + { "in " booktitle emphasize * ", " * format.editors * } + if$ + } + if$ +} + +FUNCTION {empty.misc.check} +{ author empty$ title empty$ howpublished empty$ + month empty$ year empty$ note empty$ + and and and and and + { "all relevant fields are empty in " cite$ * warning$ } + 'skip$ + if$ +} + +FUNCTION {format.thesis.type} +{ type empty$ + 'skip$ + { pop$ + type "t" change.case$ + } + if$ +} + +FUNCTION {format.tr.number} +{ type empty$ + { "Tech. {R}ep." } + 'type + if$ + number empty$ + { "t" change.case$ } + { number tie.or.space.connect } + if$ +} + +FUNCTION {format.article.crossref} +{ key empty$ + { journal empty$ + { "need key or journal for " cite$ * " to crossref " * crossref * + warning$ + "" + } + { "In {\em " journal * "\/}" * } + if$ + } + { "In " key * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.crossref.editor} +{ editor #1 "{vv~}{ll}" format.name$ + editor num.names$ duplicate$ + #2 > + { pop$ " et~al." * } + { #2 < + 'skip$ + { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = + { " et~al." * } + { " and " * editor #2 "{vv~}{ll}" format.name$ * } + if$ + } + if$ + } + if$ +} + +FUNCTION {format.book.crossref} +{ volume empty$ + { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ + "In " + } + { "vol." volume tie.or.space.connect + " of " * + } + if$ + editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { series empty$ + { "need editor, key, or series for " cite$ * " to crossref " * + crossref * warning$ + "" * + } + { "{\em " * series * "\/}" * } + if$ + } + { key * } + if$ + } + { format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.incoll.inproc.crossref} +{ editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { booktitle empty$ + { "need editor, key, or booktitle for " cite$ * " to crossref " * + crossref * warning$ + "" + } + { "In {\em " booktitle * "\/}" * } + if$ + } + { "In " key * } + if$ + } + { "In " format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {article} +{ output.bibitem + format.authors "author" output.check + new.block + format.title ",''" * "title" output.check + new.ncblock + crossref missing$ + { journal emphasize "journal" output.check + format.vol.num.pages output + format.date "year" output.check + } + { format.article.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {book} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {booklet} +{ output.bibitem + format.authors output + new.block + format.title ",''" * "title" output.check + new.nccont + howpublished address new.block.checkb + howpublished output + address output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {inbook} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + format.chapter.pages "chapter and pages" output.check + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { format.chapter.pages "chapter and pages" output.check + new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {incollection} +{ output.bibitem + format.authors "author" output.check + new.block + format.title ",''" * "title" output.check + new.ncblock + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.chapter.pages output + new.sentence + publisher "publisher" output.check + address output + format.edition output + format.date "year" output.check + } + { format.incoll.inproc.crossref output.nonnull + format.chapter.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {inproceedings} +{ output.bibitem + format.authors "author" output.check + new.block + format.title ",''" * "title" output.check + new.ncblock + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + address empty$ + { organization publisher new.sentence.checkb + organization output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + organization output + } + if$ + format.bvolume output + format.number.series output + format.pages output + publisher output + } + { format.incoll.inproc.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {conference} { inproceedings } + +FUNCTION {manual} +{ output.bibitem + author empty$ + { organization empty$ + 'skip$ + { organization output.nonnull + address output + } + if$ + } + { format.authors output.nonnull } + if$ + new.block + format.btitle "title" output.check + author empty$ + { organization empty$ + { address new.block.checka + address output + } + 'skip$ + if$ + } + { organization address new.block.checkb + organization output + address output + } + if$ + format.edition output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {mastersthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.title ",''" * "title" output.check + new.ncblock + "M.S. thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {misc} +{ output.bibitem + format.authors output + title howpublished new.block.checkb + format.title ",''" * output + new.nccont + howpublished new.block.checka + howpublished output + format.date output + new.block + note output + fin.entry + empty.misc.check +} + +FUNCTION {phdthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.btitle "title" output.check + new.block + "Ph.D. thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {proceedings} +{ output.bibitem + editor empty$ + { organization output } + { format.editors output.nonnull } + if$ + new.block + format.btitle "title" output.check + format.bvolume output + format.number.series output + address empty$ + { editor empty$ + { publisher new.sentence.checka } + { organization publisher new.sentence.checkb + organization output + } + if$ + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + editor empty$ + 'skip$ + { organization output } + if$ + publisher output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {techreport} +{ output.bibitem + format.authors "author" output.check + new.block + format.title ",''" * "title" output.check + new.ncblock + format.tr.number output.nonnull + institution "institution" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {unpublished} +{ output.bibitem + format.authors "author" output.check + new.block + format.title ",''" * "title" output.check + new.ncblock + note "note" output.check + format.date output + fin.entry +} + +FUNCTION {default.type} { misc } + +MACRO {jan} {"Jan."} + +MACRO {feb} {"Feb."} + +MACRO {mar} {"Mar."} + +MACRO {apr} {"Apr."} + +MACRO {may} {"May"} + +MACRO {jun} {"June"} + +MACRO {jul} {"July"} + +MACRO {aug} {"Aug."} + +MACRO {sep} {"Sept."} + +MACRO {oct} {"Oct."} + +MACRO {nov} {"Nov."} + +MACRO {dec} {"Dec."} + +MACRO {acmcs} {"ACM Computing Surveys"} + +MACRO {acta} {"Acta Informatica"} + +MACRO {cacm} {"Communications of the ACM"} + +MACRO {ibmjrd} {"IBM Journal of Research and Development"} + +MACRO {ibmsj} {"IBM Systems Journal"} + +MACRO {ieeese} {"IEEE Transactions on Software Engineering"} + +MACRO {ieeetc} {"IEEE Transactions on Computers"} + +MACRO {ieeetcad} + {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"} + +MACRO {ipl} {"Information Processing Letters"} + +MACRO {jacm} {"Journal of the ACM"} + +MACRO {jcss} {"Journal of Computer and System Sciences"} + +MACRO {scp} {"Science of Computer Programming"} + +MACRO {sicomp} {"SIAM Journal on Computing"} + +MACRO {tocs} {"ACM Transactions on Computer Systems"} + +MACRO {tods} {"ACM Transactions on Database Systems"} + +MACRO {tog} {"ACM Transactions on Graphics"} + +MACRO {toms} {"ACM Transactions on Mathematical Software"} + +MACRO {toois} {"ACM Transactions on Office Information Systems"} + +MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"} + +MACRO {tcs} {"Theoretical Computer Science"} + +READ + +STRINGS { longest.label } + +INTEGERS { number.label longest.label.width } + +FUNCTION {initialize.longest.label} +{ "" 'longest.label := + #1 'number.label := + #0 'longest.label.width := +} + +FUNCTION {longest.label.pass} +{ number.label int.to.str$ 'label := + number.label #1 + 'number.label := + label width$ longest.label.width > + { label 'longest.label := + label width$ 'longest.label.width := + } + 'skip$ + if$ +} + +EXECUTE {initialize.longest.label} + +ITERATE {longest.label.pass} + +FUNCTION {begin.bib} +{ preamble$ empty$ + 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" longest.label * "}" * write$ newline$ +} + +EXECUTE {begin.bib} + +EXECUTE {init.state.consts} + +ITERATE {call.type$} + +FUNCTION {end.bib} +{ newline$ + "\end{thebibliography}" write$ newline$ +} + +EXECUTE {end.bib} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End of IEEE.bst %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + diff --git a/talk/dls2014/Makefile b/talk/dls2014/Makefile new file mode 100644 --- /dev/null +++ b/talk/dls2014/Makefile @@ -0,0 +1,14 @@ +PROJECT=report +TEX=pdflatex +BIBTEX=bibtex +BUILDTEX=$(TEX) $(PROJECT).tex + +all: + $(BUILDTEX) + $(BIBTEX) $(PROJECT) + $(BUILDTEX) + $(BUILDTEX) + + +clean: + rm -f *.log *.bak *.aux *.bbl *.blg *.idx *.toc *.out *~ diff --git a/talk/dls2014/bibl_conf.bib b/talk/dls2014/bibl_conf.bib new file mode 100644 --- /dev/null +++ b/talk/dls2014/bibl_conf.bib @@ -0,0 +1,192 @@ +------------------------------------------------------------------------ + + at ARTICLE{Article, + AUTHOR = {}, + TITLE = {}, + JOURNAL = {}, + Volume = {}, + Number = {}, + Pages = {}, + Month = {}, + YEAR = {}, + Note = {}, + summary = {} + } + at BOOK{Book, + AUTHOR = {}, + editor = {}, + TITLE = {}, + PUBLISHER = {}, + YEAR = {}, + Volume = {}, + number = {}, + Series = {}, + Address = {}, + Edition = {}, + Month = {}, + Note = {}, + summary = {} + } + at BOOKLET{Booklet, + TITLE = {}, + Author = {}, + Howpublished = {}, + Address = {}, + Month = {}, + Year = {}, + Note = {}, + summary = {} + } + at INBOOK{Inbook, + AUTHOR = {}, + editor = {}, + TITLE = {}, + CHAPTER = {}, + pages = {}, + PUBLISHER = {}, + YEAR = {}, + Volume = {}, + number = {}, + Series = {}, + Type = {}, + Address = {}, + Edition = {}, + Month = {}, + Note = {}, + summary = {} + } + at INCOLLECTION{Incollection, + AUTHOR = {}, + TITLE = {}, + BOOKTITLE = {}, + PUBLISHER = {}, + YEAR = {}, + Editor = {}, + Volume = {}, + number = {}, + Series = {}, + Type = {}, + Chapter = {}, + Pages = {}, + Address = {}, + Edition = {}, + Month = {}, + Note = {}, + summary = {} + } + at INPROCEEDINGS{Inproceedings, + AUTHOR = {}, + TITLE = {}, + BOOKTITLE = {}, + YEAR = {}, + Editor = {}, + Volume = {}, + number = {}, + Series = {}, + Pages = {}, + Address = {}, + Month = {}, + Organization = {}, + Publisher = {}, + Note = {}, + summary = {} + } + at MANUAL{Manual, + TITLE = {}, + Author = {}, + Organization = {}, + Address = {}, + Edition = {}, + Month = {}, + Year = {}, + Note = {}, + summary = {} + } + at MASTERSTHESIS{Mastersthesis, + AUTHOR = {}, + TITLE = {}, + SCHOOL = {}, + YEAR = {}, + Type = {}, + Address = {}, + Month = {}, + Note = {}, + summary = {} + } + at MISC{Misc, + Author = {}, + Title = {}, + Howpublished = {}, + Month = {}, + Year = {}, + Note = {}, + summary = {} + } + at PHDTHESIS{Phdthesis, + AUTHOR = {}, + TITLE = {}, + SCHOOL = {}, + YEAR = {}, + Type = {}, + Address = {}, + Month = {}, + Note = {}, + summary = {} + } + at PROCEEDINGS{Proceedings, + TITLE = {}, + YEAR = {}, + Editor = {}, + Volume = {}, + number = {}, + Series = {}, + Address = {}, + Month = {}, + Organization = {}, + Publisher = {}, + Note = {}, + summary = {} + } + at TECHREPORT{Techreport, + AUTHOR = {}, + TITLE = {}, + INSTITUTION = {}, + YEAR = {}, + Type = {}, + Number = {}, + Address = {}, + Month = {}, + Note = {}, + summary = {} + } + at UNPUBLISHED{Unpublished, + AUTHOR = {}, + TITLE = {}, + NOTE = {}, + Month = {}, + Year = {}, + summary = {} + } + +------------------------------------------------------------------------ + + at BOOK{Higham:98, + AUTHOR = {N.J. Higham}, + TITLE = {Handbook of Writing for Mathematical Sciences}, + PUBLISHER = {SIAM}, + YEAR = {1998} + } + + at Book{Strunk:00, + author = {W. Strunk~Jr. and E.B. White}, + title = {Elements of Style}, + publisher = {Longman}, + year = {2000}, + edition = {4th} +} + + at MISC{Pueschel:10, + Author = {M.~P\"uschel}, + Title = {Benchmarking comments}, + Howpublished = {online:~http://people.inf.ethz.ch/markusp/teaching/263-2300-ETH-spring11/slides/class05.pdf} + } \ No newline at end of file diff --git a/talk/dls2014/mmap pages.pdf b/talk/dls2014/mmap pages.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4b877ac47e2c13c60867014b61c51ede4e761ee4 GIT binary patch [cut] diff --git a/talk/dls2014/page remapping.pdf b/talk/dls2014/page remapping.pdf new file mode 100644 index 0000000000000000000000000000000000000000..071a52617241a8d3067de4338aaa9c1d211c3353 GIT binary patch [cut] diff --git a/talk/dls2014/report.tex b/talk/dls2014/report.tex new file mode 100644 --- /dev/null +++ b/talk/dls2014/report.tex @@ -0,0 +1,905 @@ +%% LyX 2.1.0 created this file. For more info, see http://www.lyx.org/. +%% Do not edit unless you really know what you are doing. +\documentclass{article} +\usepackage[T1]{fontenc} +\usepackage[utf8]{inputenc} +\synctex=-1 +\usepackage{color} +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{fixltx2e} +\usepackage{graphicx} +\usepackage[unicode=true,pdfusetitle, + bookmarks=true,bookmarksnumbered=false,bookmarksopen=false, + breaklinks=false,pdfborder={0 0 1},backref=false,colorlinks=false] + {hyperref} + +\makeatletter +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Textclass specific LaTeX commands. +\usepackage{enumitem} % customizable list environments +\newlength{\lyxlabelwidth} % auxiliary length + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% User specified LaTeX commands. +% IEEE standard conference template; to be used with: +% spconf.sty - LaTeX style file, and +% IEEEbib.bst - IEEE bibliography style file. +% -------------------------------------------------------------------------- + +\usepackage{spconf} +\usepackage{multicol} + +% bold paragraph titles +\newcommand{\mypar}[1]{{\bf #1.}} + +% Title. +% ------ +\title{C7: Fast software transactional memory for dynamic languages} +% +% Single address. +% --------------- +%\name{Markus P\"uschel\thanks{The author thanks Jelena Kovacevic. This paper +%is a modified version of the template she used in her class.}} +%\address{Department of Computer Science\\ ETH Z\"urich\\Z\"urich, Switzerland} + +% For example: +% ------------ +%\address{School\\ +% Department\\ +% Address} +% +% Two addresses (uncomment and modify for two-address case). +% ---------------------------------------------------------- +\twoauthors + {Remigius Meier} + {Department of Computer Science\\ + ETH Zürich\\ + Switzerland} + {Armin Rigo} + {www.pypy.org} + +% nice listings +\usepackage{xcolor} +\usepackage{newverbs} + +\usepackage{color} +\definecolor{verylightgray}{rgb}{0.93,0.93,0.93} +\definecolor{darkblue}{rgb}{0.2,0.2,0.6} +\definecolor{commentgreen}{rgb}{0.25,0.5,0.37} +\usepackage{letltxmacro} + +\usepackage{listings} +\makeatletter +\LetLtxMacro{\oldlstinline}{\lstinline} + +\renewcommand\lstinline[1][]{% +\Collectverb{\@@myverb}% +} + +\def\@@myverb#1{% + \begingroup + \fboxsep=0.2em + \colorbox{verylightgray}{\oldlstinline|#1|}% + \endgroup +} +\makeatother + +\makeatother + +\usepackage{listings} +\lstset{backgroundcolor={\color{verylightgray}}, +basicstyle={\scriptsize\ttfamily}, +commentstyle={\ttfamily\color{commentgreen}}, +keywordstyle={\bfseries\color{darkblue}}, +morecomment={[l]{//}}, +morekeywords={foreach,in,def,type,dynamic,Int,Boolean,infer,void,super,if,boolean,int,else,while,do,extends,class,assert,for,switch,case,private,protected,public,const,final,static,interface,new,true,false,null,return}} +\renewcommand{\lstlistingname}{Listing} + +\begin{document} +%\ninept +\maketitle +\begin{abstract} +... +\end{abstract} + +\section{Introduction} + +Dynamic languages like Python, PHP, Ruby, and JavaScript are usually +regarded as very expressive but also very slow. In recent years, the +introduction of just-in-time compilers (JIT) for these languages (e.g. +PyPy, V8, Tracemonkey) started to change this perception by delivering +good performance that enables new applications. However, a parallel +programming model was not part of the design of those languages. Thus, +the reference implementations of e.g. Python and Ruby use a single, +global interpreter lock (GIL) to serialize the execution of code in +threads. + +While this GIL prevents any parallelism from occurring, it also provides +some useful guarantees. Since this lock is always acquired while executing +bytecode instructions and it may only be released in-between such +instructions, it provides perfect isolation and atomicity between +multiple threads for a series of instructions. Another technology +that can provide the same guarantees is transactional memory (TM). + +There have been several attempts at replacing the GIL with TM. Using +transactions to enclose multiple bytecode instructions, we can get +the very same semantics as the GIL while possibly executing several +transactions in parallel. Furthermore, by exposing these interpreter-level +transactions to the application in the form of \emph{atomic blocks}, +we give dynamic languages a new synchronization mechanism that avoids +several of the problems of locks as they are used now. + + + +Our contributions include: +\begin{itemize}[noitemsep] +\item We introduce a new software transactional memory (STM) system that +performs well even on low numbers of CPUs. It uses a novel combination +of hardware features and garbage collector (GC) integration in order +to keep the overhead of STM very low. +\item This new STM system is used to replace the GIL in Python and is then +evaluated extensively. +\item We introduce atomic blocks to the Python language to provide a backwards +compatible, composable synchronization mechanism for threads. +\end{itemize} + + + +\section{Background} + + +\subsection{Transactional Memory} + +Transactional memory (TM) is a concurrency control mechanism that +comes from database systems. Using transactions, we can group a series +of instructions performing operations on memory and make them happen +atomically and in complete isolations from other transactions. \emph{Atomicity} +means that all these instructions in the transaction and their effects +seem to happen at one, undividable point in time. Other transactions +never see inconsistent state of a partially executed transaction which +is called \emph{isolation}. + +If we start multiple such transactions in multiple threads, the TM +system guarantees that the outcome of running the transactions is +\emph{serializable}. Meaning, the outcome is equal to some sequential +execution of these transactions. Overall, this is exactly what a single +global lock guarantees while still allowing the TM system to run transactions +in parallel as an optimization. + + +\subsection{Python} + +We implement and evaluate our system for the Python language. For +the actual implementation, we chose the PyPy interpreter because +replacing the GIL there with a TM system is just a matter of adding +a new transformation to the translation process of the interpreter. + +Over the years, Python added multiple ways to provide concurrency +and parallelism to its applications. We want to highlight two of them, +namely \emph{threading }and \emph{multiprocessing}. + +\emph{Threading} employs operating system (OS) threads to provide +concurrency. It is, however, limited by the GIL and thus does not +provide parallelism. At this point we should mention that it is indeed +possible to run external functions written in C instead of Python +in parallel. Our work focuses on Python itself and ignores this aspect +as it requires writing in a different language. + +The second approach, \emph{multiprocessing}, uses multiple instances +of the interpreter itself and runs them in separate OS processes. +Here we actually get parallelism because we have one GIL per interpreter, +but of course we have the overhead of multiple processes / interpreters +and also need to exchange data between them explicitly and expensively. + +We focus on the \emph{threading }approach. This requires us to remove +the GIL from our interpreter in order to run code in parallel on multiple +threads. One approach to this is fine-grained locking instead of a +single global lock. Jython and IronPython are implementations of +this. It requires great care in order to avoid deadlocks, which is +why we follow the TM approach that provides a \emph{direct }replacement +for the GIL. It does not require careful placing of locks in the right +spots. We will compare our work with Jython for evaluation. + + + + +\subsection{Synchronization} + +It is well known that using locks to synchronize multiple threads +is hard. They are non-composable, have overhead, may deadlock, limit +scalability, and overall add a lot of complexity. For a better parallel +programming model for dynamic languages, we want to add another, well-known +synchronization mechanism: \emph{atomic blocks}. + +Atomic blocks are composable, deadlock-free, higher-level and expose +useful atomicity and isolation guarantees to the application for a +series of instructions. An implementation using a GIL would simply +guarantee that the GIL is not released during the execution of the +atomic block. Using TM, we have the same effect by guaranteeing that +all instructions in an atomic block are executed inside a single transaction. + + + +STM, how atomicity \& isolation + +reasons for overhead + + + + +\section{Method} + + +\subsection{Transactional Memory Model} + +In this section, we describe the general model of our TM system. This +should clarify the general semantics using commonly used terms from +the literature. + + +\subsubsection{Conflict Handling} + +Our conflict detection works with \emph{object granularity}. Conceptually, +it is based on \emph{read }and \emph{write sets }of transactions. +Two transactions conflict if they have accessed a common object that +is now in the write set of at least one of them. + +The \emph{concurrency control }works partly \emph{optimistically} +for reading of objects, where conflicts caused by just reading an +object in transactions are detected only when the transaction that +writes the object actually commits. For write-write conflicts we are +currently \emph{pessimistic}: Only one transaction may have a certain +object in its write set at any point in time, others trying to write +to it will have to wait or abort. + +We use \emph{lazy version management }to ensure that modifications +by a transaction are not visible to another transaction before the +former commits. + + + + +\subsubsection{Semantics} + +As required for TM systems, we guarantee complete \emph{isolation +}and \emph{atomicity }for transactions at all times. Furthermore, +the isolation provides full \emph{opacity }to always guarantee a consistent +read set. + +We support the notion of \emph{inevitable transactions }that are always +guaranteed to commit. There is always at most one such transaction +running in the system. We use this kind of transaction to provide +\emph{strong isolation} by running non-transactional code in the context +of inevitable transactions and to still provide the \emph{serializability} +of all transaction schedules. + + + + +\subsubsection{Contention Management} + +When a conflict is detected, we perform some simple contention management. +First, inevitable transactions always win. Second, the older transaction +wins. Different schemes are possible. + + +\subsubsection{Software Transactional Memory} + +Generally speaking, the system is fully implemented in software. However, +we exploit some more advanced features of current CPUs, especially +\emph{memory segmentation, virtual memory, }and the 64-bit address +space. + + +\subsection{Implementation} + +In this section, we will present the general idea of how the TM model +is implemented. Especially the aspects of providing isolation and +atomicity, as well as conflict detection are explained. We try to +do this without going into too much detail about the implementation. +The later section \ref{sub:Low-level-Implementation} will discuss +it in more depth. + + +\subsubsection{Memory Segmentation} + +A naive approach to providing complete isolation between threads is +to partition the virtual memory of a process into $N$ segments, one +per thread. Each segment then holds a copy of all the memory available +to the program. Thus, each thread automatically has a private copy +of every object that it can modify in complete isolation from other +threads. + +To get references to objects that are valid in all threads, we will +use the object's offset inside the segment. Since all segments are +copies of each other, the \emph{Segment Offset (SO)} will point to +the private version of an object in all threads/segments. To then +translate this SO to a real virtual memory address when used inside +a thread, we need to add the thread's segment start address to the +SO. The result of this operation is called a \emph{Linear Address +(LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. + +To make this address translation efficient, we use the segment register +$\%gs$. When this register points to a thread's segment start address, +we can instruct the CPU to perform the above translation from a reference +of the form $\%gs{::}SO$ to the right LA on its own. + +In summary, we can use a single SO to reference the same object in +all threads, and it will be translated by the CPU to a LA that always +points to the thread's private version of this object. Thereby, threads +are fully isolated from each other. However, $N$ segments require +$N$-times the memory and modifications on an object need to be propagated +to all segments. + +\begin{figure*}[t] +\begin{centering} +\includegraphics[scale=0.8]{\string"segment addressing\string".pdf} +\par\end{centering} + +\protect\caption{Segment Addressing\label{fig:Segment-Addressing}} + + +\end{figure*} + + + +\subsubsection{Page Sharing} + +In order to eliminate the prohibitive memory requirements of keeping +around $N$ segment copies, we share memory between them. The segments +are initially allocated in a single range of virtual memory by a call +to \inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8}. +As illustrated in Figure \ref{fig:mmap()-Page-Mapping}, \inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8} +creates a mapping between a range of virtual memory pages and virtual +file pages. The virtual file pages are then mapped lazily by the kernel +to real physical memory pages. The mapping generated by \inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8} +is initially linear but can be changed arbitrarily. Especially, we +can remap so that multiple virtual memory pages map to a single virtual +file page. This is what we use to share memory between the segments +since then we also only require one page of physical memory. + +\begin{figure}[h] +\begin{centering} +\includegraphics[scale=0.8]{\string"mmap pages\string".pdf} +\par\end{centering} + +\protect\caption{\texttt{mmap()} Page Mapping\label{fig:mmap()-Page-Mapping}} +\end{figure} + + +As illustrated in Figure \ref{fig:Page-Remapping}, in our initial +configuration (I) all segments are backed by their own range of virtual +file pages. This is the share-nothing configuration. + +We then designate segment 0 to be the \emph{Sharing-Segment}. No thread +gets this segment assigned to it, it simply holds the pages shared +between all threads. So in (II), we remap all virtual pages of the +segments $>0$ to the file pages of our sharing-segment. This is the +fully-shared configuration. + +During runtime, we can then privatize single pages in segments $>0$ +again by remapping single pages as seen in (III). + +Looking back at address translation for object references, we see +now that this is actually a two-step process. First, $\%gs{::}SO$ +gets translated to different linear addresses in different threads +by the CPU. Then, depending on the current mapping of virtual pages +to file pages, these LAs can map to a single file page in the sharing-segment, +or to privatized file pages in the corresponding segments. This mapping +is also performed efficiently by the CPU and can easily be done on +every access to an object. + +In summary, $\%gs{::}SO$ is translated efficiently by the CPU to +either a physical memory location which is shared between several +threads/segments, or to a location in memory private to the segment/thread. +This makes the memory segmentation model for isolation memory efficient +again. + +\begin{figure}[h] +\begin{centering} +\includegraphics[width=1\columnwidth]{\string"page remapping\string".pdf} +\par\end{centering} + +\protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to +segment 0, fully shared memory configuration. (III) privatize single +pages.\label{fig:Page-Remapping}} +\end{figure} + + + +\subsubsection{Isolation: Copy-On-Write} + +We now use these mechanisms to provide isolation for transactions. +Using write barriers, we implement a \emph{Copy-On-Write (COW) }on +the level of pages. Starting from the initial fully-shared configuration +(Figure \ref{fig:Page-Remapping}, (II)), when we need to modify an +object without other threads seeing the changes immediately, we ensure +that all pages belonging to the object are private to our segment. + +To detect when to privatize pages, we use write barriers before every +write. When the barrier detects that the object is not in a private +page (or any pages that belong to the object), we remap and copy the +pages to the thread's segment. From now on, the translation of $\%gs{::}SO$ +in this particular segment will resolve to the private version of +the object. Note, the SO used to reference the object does not change +during that process. + + + + +\subsubsection{Isolation: Barriers} + +The job of barriers is to ensure complete isolation between transactions +and to register the objects in the read or write set. We insert read +and write barriers before reading or modifying an object except if +we statically know an object to be readable or writable already. +\begin{description} +\item [{Read~Barrier:}] Adds the object to the read set of the current +transaction. Since our two-step address translation automatically +resolves the reference to the private version of the object on every +access anyway, this is not the job of the read barrier anymore. +\item [{Write~Barrier:}] Adds the object to the read and write set of +the current transaction and checks if all pages of the object are +private, doing COW otherwise.\\ +Furthermore, we currently allow only one transaction modifying an +object at a time. To ensure this, we acquire a write lock on the object +and also eagerly check for a write-write conflict at this point. If +there is a conflict, we do some contention management to decide which +transaction has to wait or abort. Eagerly detecting this kind of conflict +is not inherent to our system, future experiments may show that we +want to lift this restriction. +\end{description} + + + +\subsubsection{Atomicity: Commit \& Abort} + +To provide atomicity for a transaction, we want to make changes visible +on commit. We also need to be able to completely abort a transaction +without a trace, like it never happened. +\begin{description} +\item [{Commit:}] If a transaction commits, we synchronize all threads +so that all of them are waiting in a safe point. In the committing +transaction, we go through all objects in the write set and check +if another transaction in a different segment read the same object. +Conflicts are resolved again by either the committing or the other +transaction waiting or aborting.\\ +We then push all changes of modified objects in private pages to all +the pages in other segments, including the sharing-segment (segment +0). +\item [{Abort:}] On abort the transaction will forget about all the changes +it has done. All objects in the write set are reset by copying their +previous version from the sharing-segment into the private pages of +the aborting transaction. +\item [{}]~ +\end{description} + +\subsubsection{Summary} + +We provide isolation between transactions by privatizing the pages +of the segments belonging to the threads the transactions run in. +To detect when and which pages need privatization, we use write barriers +that trigger a COW of one or several pages. Conflicts, however, are +detected on the level of objects; based on the concept of read and +write sets. Barriers before reading and writing add objects to the +corresponding set; particularly detecting write-write conflicts eagerly. +On commit, we resolve read-write conflicts and push modifications +to other segments. Aborting transactions simply undo their changes +by copying from the sharing-segment. + + +\subsection{Low-level Implementation\label{sub:Low-level-Implementation}} + +In this section, we will provide details about the actual implementation +of the system and discuss some of the issues that we encountered. + + +\subsubsection{Architecture} + +Our TM system is designed as a library that covers all aspects around +transactions and object management. The library consists of two parts: +(I) It provides a simple interface to starting and committing transactions, +as well as the required read and write barriers. (II) It also includes +a \emph{garbage collector (GC) }that is closely integrated with the +TM part (e.g. it shares the write barrier). The close integration +helps in order to know more about the lifetime of an object, as will +be explained in the following sections. + + +\subsubsection{Application Programming Interface\label{sub:Application-Programming-Interfac}} + +\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void stm_start_transaction(tl, jmpbuf) +void stm_commit_transaction() +void stm_read(object_t *obj) +void stm_write(object_t *obj) +object_t *stm_allocate(ssize_t size_rounded) +STM_PUSH_ROOT(tl, obj) +STM_POP_ROOT(tl, obj) +\end{lstlisting} +\inputencoding{utf8} + +\inputencoding{latin9}\lstinline!stm_start_transaction()!\inputencoding{utf8} +starts a transaction. It requires two arguments, the first being a +thread-local data structure and the second a buffer for use by \inputencoding{latin9}\lstinline!setjmp()!\inputencoding{utf8}. +\inputencoding{latin9}\lstinline!stm_commit_transaction()!\inputencoding{utf8} +tries to commit the current transaction. \inputencoding{latin9}\lstinline!stm_read()!\inputencoding{utf8}, +\inputencoding{latin9}\lstinline!stm_write()!\inputencoding{utf8} +perform a read or a write barrier on an object and \inputencoding{latin9}\lstinline!stm_allocate()!\inputencoding{utf8} +allocates a new object with the specified size (must be a multiple +of 16). \inputencoding{latin9}\lstinline!STM_PUSH_ROOT()!\inputencoding{utf8} +and \inputencoding{latin9}\lstinline!STM_POP_ROOT()!\inputencoding{utf8} +push and pop objects on the shadow stack% +\footnote{A stack for pointers to GC objects that allows for precise garbage +collection. All objects on that stack are never seen as garbage and +are thus always kept alive.% +}. Objects have to be saved using this stack around calls that may +cause a GC cycle to happen, and also while there is no transaction +running. In this simplified API, only \inputencoding{latin9}\lstinline!stm_allocate()!\inputencoding{utf8} +and \inputencoding{latin9}\lstinline!stm_commit_transaction()!\inputencoding{utf8} +require saving object references. + +The type \inputencoding{latin9}\lstinline!object_t!\inputencoding{utf8} +is special as it causes the compiler% +\footnote{Clang 3.5 with some patches to this address-space 256 feature% +} to make all accesses through it relative to the $\%gs$ register. +With exceptions, nearly all accesses to objects managed by the TM +system should use this type so that the CPU will translate the reference +to the right version of the object. + + +\subsubsection{Setup\label{sub:Setup}} + +On startup, we reserve a big range of virtual memory with a call to +\inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8} and +partition this space into $N+1$ segments. We want to run $N$ threads +in parallel while segment 0 is designated as the \emph{sharing-segment +}that is never assigned to a thread. + +The next step involves using \inputencoding{latin9}\lstinline!remap_file_pages()!\inputencoding{utf8}, +a Linux system call, to establish the fully-shared configuration. +All pages of segments $>0$ map to the pages of the sharing-segment. + +However, the layout of a segment is not uniform and we actually privatize +a few areas again right away. These areas are illustrated in Figure +\ref{fig:Segment-Layout} and explained here: +\begin{description}[noitemsep] +\item [{NULL~page:}] This page is unmapped and will produce a segmentation +violation when accessed. We use this to detect erroneous dereferencing +of \inputencoding{latin9}\lstinline!NULL!\inputencoding{utf8} references. +All $\%gs{::}SO$ translated to linear addresses will point to NULL +pages if SO is set to \inputencoding{latin9}\lstinline!NULL!\inputencoding{utf8}. +\item [{Segment-local~data:}] Some area private to the segment that contains +segment-local information. +\item [{Read~markers:}] These are pages that store information about which +objects were read in the current transaction running in this segment. +\item [{Nursery:}] This area contains all the freshly allocated objects +(\emph{young objects}) of the current transaction. The GC uses pointer-bump +allocation in this area to allocate objects in the first generation. +\item [{Old~object~space:}] These pages are the ones that are really +shared between segments. They mostly contain old objects but also +some young ones that were too big to allocate in the nursery. +\end{description} +\begin{figure*}[t] +\begin{centering} +\includegraphics[scale=0.8]{\string"segment layout\string".pdf} +\par\end{centering} + +\protect\caption{Segment Layout\label{fig:Segment-Layout}} +\end{figure*} + + + + + +\subsubsection{Assigning Segments} + +From the above setup it is clear that the number of segments is statically +set to some $N$. That means that at any point in time, a maximum +of $N$ threads and their transactions can be running in parallel. +To support an unlimited number of threads in applications that use +this TM system, we assign segments dynamically to threads. + +At the start of a transaction, the thread it is running in acquires +a segment. It may have to wait until another thread finishes its transaction +and releases a segment. Fairness is not guaranteed yet, as we simply +assume a fair scheduling policy in the operating system when waiting +on a condition variable. + +Therefore, a thread may be assigned to different segments each time +it starts a transaction. Although, we try to assign it the same segment +again if possible. And a maximum of $N$ transactions may run in parallel. + + + + +\subsubsection{Garbage Collection} + +Garbage collection plays a big role in our TM system. The GC is generational +and has two generations. + +The \textbf{first generation}, where objects are considered to be +\emph{young }and reside in the \emph{Nursery}, is collected by \emph{minor +collections}. These collections move the surviving objects out of +the nursery into the old object space, which can be done without stopping +other threads. This is done either if the nursery has no space left +anymore or if we are committing the current transaction. Consequently, +all objects are old and the nursery empty after a transaction commits. +Furthermore, all objects in the nursery were always created in the +current transaction. This fact is useful since we do not need to call +any barrier on this kind of objects. + +To improve this situation even more, we introduce the concept of \emph{overflow +objects}. If a minor collection needs to occur during a transaction, +we empty the nursery and mark each surviving object in the old object +space with an \inputencoding{latin9}\lstinline!overflow_number!\inputencoding{utf8} +globally unique to the current transaction. That way we can still +detect in a medium-fast path inside barriers that the object still +belongs to the current transaction. + +The \textbf{second generation}, where objects are considered to be +\emph{old }and never move again, is collected by \emph{major collections}. +These collections are implemented in a stop-the-world kind of way +and first force minor collections in all threads. The major goal is +to free objects in the old objects space. Furthermore, we optimistically +re-share pages that do not need to be private anymore. + +As seen in the API (section \ref{sub:Application-Programming-Interfac}), +we use a \emph{shadow stack }in order to provide precise garbage collection. +Any time we call a function that possibly triggers a collection, we +need to save the objects that we need afterwards on the shadow stack +using \inputencoding{latin9}\lstinline!STM_PUSH_ROOT()!\inputencoding{utf8}. +That way, they will not be freed. And in case they were young, we +get their new location in the old object space when getting them back +from the stack using \inputencoding{latin9}\lstinline!STM_POP_ROOT()!\inputencoding{utf8}. + + + + +\subsubsection{Read Barrier} + +The point of the read barrier is to add the object to the read set +of the transaction. This information is needed to detect conflicts +between transactions. Usually, it also resolves an object reference +to a private copy, but since the CPU performs our address translation +on every object access efficiently, we do not need to do that in our +barrier. + +To add the object to the read set, for us it is enough to mark it +as read. Since this information needs to be local to the segment, +we need to store it in private pages. The area is called \emph{read +markers }and already mentioned in section \ref{sub:Setup}. This area +can be seen as a continuous array of bytes that is indexed from the +start of the segment by an object's reference ($SO$) divided by 16 +(this requires objects of at least 16 bytes in size). Instead of just +setting the byte to \inputencoding{latin9}\lstinline!true!\inputencoding{utf8} +if the corresponding object was read, we set it to a \inputencoding{latin9}\lstinline!read_version!\inputencoding{utf8} +belonging to the transaction, which will be incremented on each commit. +Thereby, we can avoid resetting the bytes to \inputencoding{latin9}\lstinline!false!\inputencoding{utf8} +on commit and only need to do this every 255 transactions. The whole +code for the barrier is easily optimizable for compilers as well as +perfectly predictable for CPUs: + +\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void stm_read(SO): + *(SO >> 4) = read_version +\end{lstlisting} +\inputencoding{utf8} + + +\subsubsection{Write Barrier} + +The job of the write barrier is twofold: first, it serves as a write +barrier for the garbage collector and second, it supports copy-on-write +and adds objects to the write set of the transaction. + +The \textbf{fast path} of the write barrier is very simple. We only +need to check for the flag \inputencoding{latin9}\lstinline!WRITE_BARRIER!\inputencoding{utf8} +in the object's header and call the slow path if it is set. This flag +is set either if the object is old and comes from an earlier transaction, +or if there was a minor collection which will add the flag again on +all objects. It is never set on freshly allocated objects that still +reside in the nursery. + +\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void stm_write(SO): + if SO->flags & WRITE_BARRIER: + write_slowpath(SO) +\end{lstlisting} +\inputencoding{utf8} + +The \textbf{slow path} is shown here: + +\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void write_slowpath(SO): + // GC part: + list_append(to_trace, SO) + if is_overflow_obj(SO): + SO->flags &= ~WRITE_BARRIER + return + // STM part + stm_read(SO) + lock_idx = SO >> 4 + retry: + if write_locks[lock_idx] == our_num: + // we already own it + else if write_locks[lock_idx] == 0: + if cmp_and_swap(&write_locks[lock_idx], + 0, our_num): + list_append(modified_old_objects, SO) + privatize_pages(SO) + else: + goto retry + else: + w_w_contention_management() + goto retry + SO->flags &= ~WRITE_BARRIER +\end{lstlisting} +\inputencoding{utf8} + From noreply at buildbot.pypy.org Tue May 6 11:08:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 6 May 2014 11:08:19 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: fix some formatting Message-ID: <20140506090819.37D8A1C317E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5246:9b23a18a2d8e Date: 2014-05-06 11:06 +0200 http://bitbucket.org/pypy/extradoc/changeset/9b23a18a2d8e/ Log: fix some formatting diff --git a/talk/dls2014/report.tex b/talk/dls2014/report.tex --- a/talk/dls2014/report.tex +++ b/talk/dls2014/report.tex @@ -17,7 +17,7 @@ \makeatletter %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Textclass specific LaTeX commands. \usepackage{enumitem} % customizable list environments -\newlength{\lyxlabelwidth} % auxiliary length +\newlength{\lyxlabelwidth} % auxiliary length %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% User specified LaTeX commands. % IEEE standard conference template; to be used with: @@ -31,6 +31,14 @@ % bold paragraph titles \newcommand{\mypar}[1]{{\bf #1.}} +\newcommand{\mynote}[2]{% + \textcolor{red}{% + \fbox{\bfseries\sffamily\scriptsize#1}% + {\small$\blacktriangleright$\textsf{\emph{#2}}$\blacktriangleleft$}% + }% +} +\newcommand\remi[1]{\mynote{Remi}{#1}} + % Title. % ------ \title{C7: Fast software transactional memory for dynamic languages} @@ -38,7 +46,7 @@ % Single address. % --------------- %\name{Markus P\"uschel\thanks{The author thanks Jelena Kovacevic. This paper -%is a modified version of the template she used in her class.}} +%is a modified version of the template she used in her class.}} %\address{Department of Computer Science\\ ETH Z\"urich\\Z\"urich, Switzerland} % For example: @@ -50,12 +58,13 @@ % Two addresses (uncomment and modify for two-address case). % ---------------------------------------------------------- \twoauthors - {Remigius Meier} - {Department of Computer Science\\ - ETH Zürich\\ - Switzerland} - {Armin Rigo} - {www.pypy.org} + {Remigius Meier} + {Department of Computer Science\\ + ETH Zürich, Switzerland\\ + \nolinkurl{remi.meier at inf.ethz.ch}} + {Armin Rigo} + {www.pypy.org\\ + \nolinkurl{arigo at tunes.org}} % nice listings \usepackage{xcolor} @@ -72,7 +81,7 @@ \LetLtxMacro{\oldlstinline}{\lstinline} \renewcommand\lstinline[1][]{% -\Collectverb{\@@myverb}% + \Collectverb{\@@myverb}% } \def\@@myverb#1{% @@ -83,22 +92,27 @@ } \makeatother -\makeatother + \usepackage{listings} + \lstset{backgroundcolor={\color{verylightgray}}, -basicstyle={\scriptsize\ttfamily}, -commentstyle={\ttfamily\color{commentgreen}}, -keywordstyle={\bfseries\color{darkblue}}, -morecomment={[l]{//}}, -morekeywords={foreach,in,def,type,dynamic,Int,Boolean,infer,void,super,if,boolean,int,else,while,do,extends,class,assert,for,switch,case,private,protected,public,const,final,static,interface,new,true,false,null,return}} + basicstyle={\scriptsize\ttfamily}, + commentstyle={\ttfamily\color{commentgreen}}, + keywordstyle={\bfseries\color{darkblue}}, + morecomment={[l]{//}}, + morekeywords={foreach,in,def,type,dynamic,Int, + Boolean,infer,void,super,if,boolean,int,else, + while,do,extends,class,assert,for,switch,case, + private,protected,public,const,final,static, + interface,new,true,false,null,return}} \renewcommand{\lstlistingname}{Listing} \begin{document} -%\ninept -\maketitle +\ninept +\maketitle \begin{abstract} -... + ... \end{abstract} \section{Introduction} @@ -111,35 +125,36 @@ programming model was not part of the design of those languages. Thus, the reference implementations of e.g. Python and Ruby use a single, global interpreter lock (GIL) to serialize the execution of code in -threads. +threads. -While this GIL prevents any parallelism from occurring, it also provides -some useful guarantees. Since this lock is always acquired while executing -bytecode instructions and it may only be released in-between such -instructions, it provides perfect isolation and atomicity between -multiple threads for a series of instructions. Another technology -that can provide the same guarantees is transactional memory (TM). +While this GIL prevents any parallelism from occurring, it also +provides some useful guarantees. Since this lock is always acquired +while executing bytecode instructions and it may only be released +in-between such instructions, it provides perfect isolation and +atomicity between multiple threads for a series of +instructions. Another technology that can provide the same guarantees +is transactional memory (TM). There have been several attempts at replacing the GIL with TM. Using -transactions to enclose multiple bytecode instructions, we can get -the very same semantics as the GIL while possibly executing several -transactions in parallel. Furthermore, by exposing these interpreter-level -transactions to the application in the form of \emph{atomic blocks}, -we give dynamic languages a new synchronization mechanism that avoids -several of the problems of locks as they are used now. - - +transactions to enclose multiple bytecode instructions, we can get the +very same semantics as the GIL while possibly executing several +transactions in parallel. Furthermore, by exposing these +interpreter-level transactions to the application in the form of +\emph{atomic blocks}, we give dynamic languages a new synchronization +mechanism that avoids several of the problems of locks as they are +used now. Our contributions include: \begin{itemize}[noitemsep] -\item We introduce a new software transactional memory (STM) system that -performs well even on low numbers of CPUs. It uses a novel combination -of hardware features and garbage collector (GC) integration in order -to keep the overhead of STM very low. -\item This new STM system is used to replace the GIL in Python and is then -evaluated extensively. -\item We introduce atomic blocks to the Python language to provide a backwards -compatible, composable synchronization mechanism for threads. +\item We introduce a new software transactional memory (STM) system + that performs well even on low numbers of CPUs. It uses a novel + combination of hardware features and garbage collector (GC) + integration in order to keep the overhead of STM very low. +\item This new STM system is used to replace the GIL in Python and is + then evaluated extensively. +\item We introduce atomic blocks to the Python language to provide a + backwards compatible, composable synchronization mechanism for + threads. \end{itemize} @@ -152,83 +167,77 @@ Transactional memory (TM) is a concurrency control mechanism that comes from database systems. Using transactions, we can group a series of instructions performing operations on memory and make them happen -atomically and in complete isolations from other transactions. \emph{Atomicity} -means that all these instructions in the transaction and their effects -seem to happen at one, undividable point in time. Other transactions -never see inconsistent state of a partially executed transaction which -is called \emph{isolation}. +atomically and in complete isolations from other +transactions. \emph{Atomicity} means that all these instructions in +the transaction and their effects seem to happen at one, undividable +point in time. Other transactions never see inconsistent state of a +partially executed transaction which is called \emph{isolation}. If we start multiple such transactions in multiple threads, the TM system guarantees that the outcome of running the transactions is \emph{serializable}. Meaning, the outcome is equal to some sequential -execution of these transactions. Overall, this is exactly what a single -global lock guarantees while still allowing the TM system to run transactions -in parallel as an optimization. +execution of these transactions. Overall, this is exactly what a +single global lock guarantees while still allowing the TM system to +run transactions in parallel as an optimization. \subsection{Python} -We implement and evaluate our system for the Python language. For -the actual implementation, we chose the PyPy interpreter because -replacing the GIL there with a TM system is just a matter of adding -a new transformation to the translation process of the interpreter. +We implement and evaluate our system for the Python language. For the +actual implementation, we chose the PyPy interpreter because replacing +the GIL there with a TM system is just a matter of adding a new +transformation to the translation process of the interpreter. -Over the years, Python added multiple ways to provide concurrency -and parallelism to its applications. We want to highlight two of them, -namely \emph{threading }and \emph{multiprocessing}. +Over the years, Python added multiple ways to provide concurrency and +parallelism to its applications. We want to highlight two of them, +namely \emph{threading }and \emph{multiprocessing}. \emph{Threading} employs operating system (OS) threads to provide concurrency. It is, however, limited by the GIL and thus does not provide parallelism. At this point we should mention that it is indeed -possible to run external functions written in C instead of Python -in parallel. Our work focuses on Python itself and ignores this aspect -as it requires writing in a different language. +possible to run external functions written in C instead of Python in +parallel. Our work focuses on Python itself and ignores this aspect as +it requires writing in a different language. The second approach, \emph{multiprocessing}, uses multiple instances of the interpreter itself and runs them in separate OS processes. -Here we actually get parallelism because we have one GIL per interpreter, -but of course we have the overhead of multiple processes / interpreters -and also need to exchange data between them explicitly and expensively. +Here we actually get parallelism because we have one GIL per +interpreter, but of course we have the overhead of multiple processes +/ interpreters and also need to exchange data between them explicitly +and expensively. We focus on the \emph{threading }approach. This requires us to remove -the GIL from our interpreter in order to run code in parallel on multiple -threads. One approach to this is fine-grained locking instead of a -single global lock. Jython and IronPython are implementations of -this. It requires great care in order to avoid deadlocks, which is -why we follow the TM approach that provides a \emph{direct }replacement +the GIL from our interpreter in order to run code in parallel on +multiple threads. One approach to this is fine-grained locking instead +of a single global lock. Jython and IronPython are implementations of +this. It requires great care in order to avoid deadlocks, which is why +we follow the TM approach that provides a \emph{direct }replacement for the GIL. It does not require careful placing of locks in the right spots. We will compare our work with Jython for evaluation. - - \subsection{Synchronization} -It is well known that using locks to synchronize multiple threads -is hard. They are non-composable, have overhead, may deadlock, limit -scalability, and overall add a lot of complexity. For a better parallel -programming model for dynamic languages, we want to add another, well-known -synchronization mechanism: \emph{atomic blocks}. +It is well known that using locks to synchronize multiple threads is +hard. They are non-composable, have overhead, may deadlock, limit +scalability, and overall add a lot of complexity. For a better +parallel programming model for dynamic languages, we want to add +another, well-known synchronization mechanism: \emph{atomic blocks}. Atomic blocks are composable, deadlock-free, higher-level and expose useful atomicity and isolation guarantees to the application for a series of instructions. An implementation using a GIL would simply guarantee that the GIL is not released during the execution of the atomic block. Using TM, we have the same effect by guaranteeing that -all instructions in an atomic block are executed inside a single transaction. +all instructions in an atomic block are executed inside a single +transaction. - -STM, how atomicity \& isolation - -reasons for overhead - - +\remi{STM, how atomicity \& isolation; reasons for overhead} \section{Method} - \subsection{Transactional Memory Model} In this section, we describe the general model of our TM system. This @@ -238,22 +247,23 @@ \subsubsection{Conflict Handling} -Our conflict detection works with \emph{object granularity}. Conceptually, -it is based on \emph{read }and \emph{write sets }of transactions. -Two transactions conflict if they have accessed a common object that -is now in the write set of at least one of them. +Our conflict detection works with \emph{object + granularity}. Conceptually, it is based on \emph{read }and +\emph{write sets }of transactions. Two transactions conflict if they +have accessed a common object that is now in the write set of at least +one of them. -The \emph{concurrency control }works partly \emph{optimistically} -for reading of objects, where conflicts caused by just reading an -object in transactions are detected only when the transaction that -writes the object actually commits. For write-write conflicts we are -currently \emph{pessimistic}: Only one transaction may have a certain -object in its write set at any point in time, others trying to write -to it will have to wait or abort. +The \emph{concurrency control }works partly \emph{optimistically} for +reading of objects, where conflicts caused by just reading an object +in transactions are detected only when the transaction that writes the +object actually commits. For write-write conflicts we are currently +\emph{pessimistic}: Only one transaction may have a certain object in +its write set at any point in time, others trying to write to it will +have to wait or abort. -We use \emph{lazy version management }to ensure that modifications -by a transaction are not visible to another transaction before the -former commits. +We use \emph{lazy version management }to ensure that modifications by +a transaction are not visible to another transaction before the former +commits. @@ -277,67 +287,65 @@ \subsubsection{Contention Management} -When a conflict is detected, we perform some simple contention management. -First, inevitable transactions always win. Second, the older transaction -wins. Different schemes are possible. +When a conflict is detected, we perform some simple contention +management. First, inevitable transactions always win. Second, the +older transaction wins. Different schemes are possible. \subsubsection{Software Transactional Memory} -Generally speaking, the system is fully implemented in software. However, -we exploit some more advanced features of current CPUs, especially -\emph{memory segmentation, virtual memory, }and the 64-bit address -space. +Generally speaking, the system is fully implemented in +software. However, we exploit some more advanced features of current +CPUs, especially \emph{memory segmentation, virtual memory, }and the +64-bit address space. \subsection{Implementation} In this section, we will present the general idea of how the TM model is implemented. Especially the aspects of providing isolation and -atomicity, as well as conflict detection are explained. We try to -do this without going into too much detail about the implementation. -The later section \ref{sub:Low-level-Implementation} will discuss -it in more depth. +atomicity, as well as conflict detection are explained. We try to do +this without going into too much detail about the implementation. The +later section \ref{sub:Low-level-Implementation} will discuss it in +more depth. \subsubsection{Memory Segmentation} -A naive approach to providing complete isolation between threads is -to partition the virtual memory of a process into $N$ segments, one -per thread. Each segment then holds a copy of all the memory available -to the program. Thus, each thread automatically has a private copy -of every object that it can modify in complete isolation from other +A naive approach to providing complete isolation between threads is to +partition the virtual memory of a process into $N$ segments, one per +thread. Each segment then holds a copy of all the memory available to +the program. Thus, each thread automatically has a private copy of +every object that it can modify in complete isolation from other threads. To get references to objects that are valid in all threads, we will use the object's offset inside the segment. Since all segments are -copies of each other, the \emph{Segment Offset (SO)} will point to -the private version of an object in all threads/segments. To then -translate this SO to a real virtual memory address when used inside -a thread, we need to add the thread's segment start address to the +copies of each other, the \emph{Segment Offset (SO)} will point to the +private version of an object in all threads/segments. To then +translate this SO to a real virtual memory address when used inside a +thread, we need to add the thread's segment start address to the SO. The result of this operation is called a \emph{Linear Address -(LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. + (LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. -To make this address translation efficient, we use the segment register -$\%gs$. When this register points to a thread's segment start address, -we can instruct the CPU to perform the above translation from a reference -of the form $\%gs{::}SO$ to the right LA on its own. +To make this address translation efficient, we use the segment +register $\%gs$. When this register points to a thread's segment start +address, we can instruct the CPU to perform the above translation from +a reference of the form $\%gs{::}SO$ to the right LA on its own. -In summary, we can use a single SO to reference the same object in -all threads, and it will be translated by the CPU to a LA that always -points to the thread's private version of this object. Thereby, threads -are fully isolated from each other. However, $N$ segments require -$N$-times the memory and modifications on an object need to be propagated -to all segments. +In summary, we can use a single SO to reference the same object in all +threads, and it will be translated by the CPU to a LA that always +points to the thread's private version of this object. Thereby, +threads are fully isolated from each other. However, $N$ segments +require $N$-times the memory and modifications on an object need to be +propagated to all segments. \begin{figure*}[t] -\begin{centering} -\includegraphics[scale=0.8]{\string"segment addressing\string".pdf} -\par\end{centering} + \begin{centering} + \includegraphics[scale=0.8]{\string"segment addressing\string".pdf} + \par\end{centering} -\protect\caption{Segment Addressing\label{fig:Segment-Addressing}} - - + \protect\caption{Segment Addressing\label{fig:Segment-Addressing}} \end{figure*} @@ -347,61 +355,61 @@ In order to eliminate the prohibitive memory requirements of keeping around $N$ segment copies, we share memory between them. The segments are initially allocated in a single range of virtual memory by a call -to \inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8}. -As illustrated in Figure \ref{fig:mmap()-Page-Mapping}, \inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8} -creates a mapping between a range of virtual memory pages and virtual -file pages. The virtual file pages are then mapped lazily by the kernel -to real physical memory pages. The mapping generated by \inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8} -is initially linear but can be changed arbitrarily. Especially, we -can remap so that multiple virtual memory pages map to a single virtual +to \lstinline!mmap()!. As illustrated in Figure +\ref{fig:mmap()-Page-Mapping}, \lstinline!mmap()! creates a mapping +between a range of virtual memory pages and virtual file pages. The +virtual file pages are then mapped lazily by the kernel to real +physical memory pages. The mapping generated by \lstinline!mmap()! is +initially linear but can be changed arbitrarily. Especially, we can +remap so that multiple virtual memory pages map to a single virtual file page. This is what we use to share memory between the segments since then we also only require one page of physical memory. \begin{figure}[h] -\begin{centering} -\includegraphics[scale=0.8]{\string"mmap pages\string".pdf} -\par\end{centering} + \begin{centering} + \includegraphics[scale=0.8]{\string"mmap pages\string".pdf} + \par\end{centering} -\protect\caption{\texttt{mmap()} Page Mapping\label{fig:mmap()-Page-Mapping}} + \protect\caption{\texttt{mmap()} Page Mapping\label{fig:mmap()-Page-Mapping}} \end{figure} As illustrated in Figure \ref{fig:Page-Remapping}, in our initial -configuration (I) all segments are backed by their own range of virtual -file pages. This is the share-nothing configuration. +configuration (I) all segments are backed by their own range of +virtual file pages. This is the share-nothing configuration. -We then designate segment 0 to be the \emph{Sharing-Segment}. No thread -gets this segment assigned to it, it simply holds the pages shared -between all threads. So in (II), we remap all virtual pages of the -segments $>0$ to the file pages of our sharing-segment. This is the -fully-shared configuration. +We then designate segment 0 to be the \emph{Sharing-Segment}. No +thread gets this segment assigned to it, it simply holds the pages +shared between all threads. So in (II), we remap all virtual pages of +the segments $>0$ to the file pages of our sharing-segment. This is +the fully-shared configuration. During runtime, we can then privatize single pages in segments $>0$ again by remapping single pages as seen in (III). -Looking back at address translation for object references, we see -now that this is actually a two-step process. First, $\%gs{::}SO$ -gets translated to different linear addresses in different threads -by the CPU. Then, depending on the current mapping of virtual pages -to file pages, these LAs can map to a single file page in the sharing-segment, -or to privatized file pages in the corresponding segments. This mapping -is also performed efficiently by the CPU and can easily be done on -every access to an object. +Looking back at address translation for object references, we see now +that this is actually a two-step process. First, $\%gs{::}SO$ gets +translated to different linear addresses in different threads by the +CPU. Then, depending on the current mapping of virtual pages to file +pages, these LAs can map to a single file page in the sharing-segment, +or to privatized file pages in the corresponding segments. This +mapping is also performed efficiently by the CPU and can easily be +done on every access to an object. In summary, $\%gs{::}SO$ is translated efficiently by the CPU to either a physical memory location which is shared between several -threads/segments, or to a location in memory private to the segment/thread. -This makes the memory segmentation model for isolation memory efficient -again. +threads/segments, or to a location in memory private to the +segment/thread. This makes the memory segmentation model for +isolation memory efficient again. \begin{figure}[h] -\begin{centering} -\includegraphics[width=1\columnwidth]{\string"page remapping\string".pdf} -\par\end{centering} + \begin{centering} + \includegraphics[width=1\columnwidth]{\string"page remapping\string".pdf} + \par\end{centering} -\protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to -segment 0, fully shared memory configuration. (III) privatize single -pages.\label{fig:Page-Remapping}} + \protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to + segment 0, fully shared memory configuration. (III) privatize single + pages.\label{fig:Page-Remapping}} \end{figure} @@ -409,8 +417,8 @@ \subsubsection{Isolation: Copy-On-Write} We now use these mechanisms to provide isolation for transactions. -Using write barriers, we implement a \emph{Copy-On-Write (COW) }on -the level of pages. Starting from the initial fully-shared configuration +Using write barriers, we implement a \emph{Copy-On-Write (COW) }on the +level of pages. Starting from the initial fully-shared configuration (Figure \ref{fig:Page-Remapping}, (II)), when we need to modify an object without other threads seeing the changes immediately, we ensure that all pages belonging to the object are private to our segment. @@ -418,11 +426,10 @@ To detect when to privatize pages, we use write barriers before every write. When the barrier detects that the object is not in a private page (or any pages that belong to the object), we remap and copy the -pages to the thread's segment. From now on, the translation of $\%gs{::}SO$ -in this particular segment will resolve to the private version of -the object. Note, the SO used to reference the object does not change -during that process. - +pages to the thread's segment. From now on, the translation of +$\%gs{::}SO$ in this particular segment will resolve to the private +version of the object. Note, the SO used to reference the object does +not change during that process. @@ -431,285 +438,285 @@ The job of barriers is to ensure complete isolation between transactions and to register the objects in the read or write set. We insert read and write barriers before reading or modifying an object except if -we statically know an object to be readable or writable already. +we statically know an object to be readable or writable already. \begin{description} \item [{Read~Barrier:}] Adds the object to the read set of the current -transaction. Since our two-step address translation automatically -resolves the reference to the private version of the object on every -access anyway, this is not the job of the read barrier anymore. + transaction. Since our two-step address translation automatically + resolves the reference to the private version of the object on every + access anyway, this is not the job of the read barrier anymore. \item [{Write~Barrier:}] Adds the object to the read and write set of -the current transaction and checks if all pages of the object are -private, doing COW otherwise.\\ -Furthermore, we currently allow only one transaction modifying an -object at a time. To ensure this, we acquire a write lock on the object -and also eagerly check for a write-write conflict at this point. If -there is a conflict, we do some contention management to decide which -transaction has to wait or abort. Eagerly detecting this kind of conflict -is not inherent to our system, future experiments may show that we -want to lift this restriction. + the current transaction and checks if all pages of the object are + private, doing COW otherwise.\\ + Furthermore, we currently allow only one transaction modifying an + object at a time. To ensure this, we acquire a write lock on the object + and also eagerly check for a write-write conflict at this point. If + there is a conflict, we do some contention management to decide which + transaction has to wait or abort. Eagerly detecting this kind of conflict + is not inherent to our system, future experiments may show that we + want to lift this restriction. \end{description} \subsubsection{Atomicity: Commit \& Abort} -To provide atomicity for a transaction, we want to make changes visible -on commit. We also need to be able to completely abort a transaction -without a trace, like it never happened. +To provide atomicity for a transaction, we want to make changes +visible on commit. We also need to be able to completely abort a +transaction without a trace, like it never happened. \begin{description} \item [{Commit:}] If a transaction commits, we synchronize all threads -so that all of them are waiting in a safe point. In the committing -transaction, we go through all objects in the write set and check -if another transaction in a different segment read the same object. -Conflicts are resolved again by either the committing or the other -transaction waiting or aborting.\\ -We then push all changes of modified objects in private pages to all -the pages in other segments, including the sharing-segment (segment -0). -\item [{Abort:}] On abort the transaction will forget about all the changes -it has done. All objects in the write set are reset by copying their -previous version from the sharing-segment into the private pages of -the aborting transaction. -\item [{}]~ + so that all of them are waiting in a safe point. In the committing + transaction, we go through all objects in the write set and check if + another transaction in a different segment read the same object. + Conflicts are resolved again by either the committing or the other + transaction waiting or aborting.\\ + We then push all changes of modified objects in private pages to all + the pages in other segments, including the sharing-segment (segment + 0). +\item [{Abort:}] On abort the transaction will forget about all the + changes it has done. All objects in the write set are reset by + copying their previous version from the sharing-segment into the + private pages of the aborting transaction. \end{description} + \subsubsection{Summary} -We provide isolation between transactions by privatizing the pages -of the segments belonging to the threads the transactions run in. -To detect when and which pages need privatization, we use write barriers +We provide isolation between transactions by privatizing the pages of +the segments belonging to the threads the transactions run in. To +detect when and which pages need privatization, we use write barriers that trigger a COW of one or several pages. Conflicts, however, are detected on the level of objects; based on the concept of read and write sets. Barriers before reading and writing add objects to the -corresponding set; particularly detecting write-write conflicts eagerly. -On commit, we resolve read-write conflicts and push modifications -to other segments. Aborting transactions simply undo their changes -by copying from the sharing-segment. +corresponding set; particularly detecting write-write conflicts +eagerly. On commit, we resolve read-write conflicts and push +modifications to other segments. Aborting transactions simply undo +their changes by copying from the sharing-segment. \subsection{Low-level Implementation\label{sub:Low-level-Implementation}} -In this section, we will provide details about the actual implementation -of the system and discuss some of the issues that we encountered. +In this section, we will provide details about the actual +implementation of the system and discuss some of the issues that we +encountered. \subsubsection{Architecture} Our TM system is designed as a library that covers all aspects around transactions and object management. The library consists of two parts: -(I) It provides a simple interface to starting and committing transactions, -as well as the required read and write barriers. (II) It also includes -a \emph{garbage collector (GC) }that is closely integrated with the -TM part (e.g. it shares the write barrier). The close integration -helps in order to know more about the lifetime of an object, as will -be explained in the following sections. +(I) It provides a simple interface to starting and committing +transactions, as well as the required read and write barriers. (II) It +also includes a \emph{garbage collector (GC) }that is closely +integrated with the TM part (e.g. it shares the write barrier). The +close integration helps in order to know more about the lifetime of an +object, as will be explained in the following sections. \subsubsection{Application Programming Interface\label{sub:Application-Programming-Interfac}} -\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] -void stm_start_transaction(tl, jmpbuf) -void stm_commit_transaction() -void stm_read(object_t *obj) -void stm_write(object_t *obj) -object_t *stm_allocate(ssize_t size_rounded) -STM_PUSH_ROOT(tl, obj) -STM_POP_ROOT(tl, obj) +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] + void stm_start_transaction(tl, jmpbuf) + void stm_commit_transaction() + void stm_read(object_t *obj) + void stm_write(object_t *obj) + object_t *stm_allocate(ssize_t size_rounded) + STM_PUSH_ROOT(tl, obj) + STM_POP_ROOT(tl, obj) \end{lstlisting} -\inputencoding{utf8} -\inputencoding{latin9}\lstinline!stm_start_transaction()!\inputencoding{utf8} -starts a transaction. It requires two arguments, the first being a -thread-local data structure and the second a buffer for use by \inputencoding{latin9}\lstinline!setjmp()!\inputencoding{utf8}. -\inputencoding{latin9}\lstinline!stm_commit_transaction()!\inputencoding{utf8} -tries to commit the current transaction. \inputencoding{latin9}\lstinline!stm_read()!\inputencoding{utf8}, -\inputencoding{latin9}\lstinline!stm_write()!\inputencoding{utf8} -perform a read or a write barrier on an object and \inputencoding{latin9}\lstinline!stm_allocate()!\inputencoding{utf8} -allocates a new object with the specified size (must be a multiple -of 16). \inputencoding{latin9}\lstinline!STM_PUSH_ROOT()!\inputencoding{utf8} -and \inputencoding{latin9}\lstinline!STM_POP_ROOT()!\inputencoding{utf8} -push and pop objects on the shadow stack% -\footnote{A stack for pointers to GC objects that allows for precise garbage -collection. All objects on that stack are never seen as garbage and -are thus always kept alive.% -}. Objects have to be saved using this stack around calls that may -cause a GC cycle to happen, and also while there is no transaction -running. In this simplified API, only \inputencoding{latin9}\lstinline!stm_allocate()!\inputencoding{utf8} -and \inputencoding{latin9}\lstinline!stm_commit_transaction()!\inputencoding{utf8} + +\lstinline!stm_start_transaction()! starts a transaction. It requires +two arguments, the first being a thread-local data structure and the +second a buffer for use by \lstinline!setjmp()!. +\lstinline!stm_commit_transaction()! tries to commit the current +transaction. \lstinline!stm_read()!, \lstinline!stm_write()! perform +a read or a write barrier on an object and \lstinline!stm_allocate()! +allocates a new object with the specified size (must be a multiple of +16). \lstinline!STM_PUSH_ROOT()! and \lstinline!STM_POP_ROOT()! push +and pop objects on the shadow stack~\footnote{A stack for pointers to + GC objects that allows for precise garbage collection. All objects + on that stack are never seen as garbage and are thus always kept + alive.}. Objects have to be saved using this stack around calls +that may cause a GC cycle to happen, and also while there is no +transaction running. In this simplified API, only +\lstinline!stm_allocate()! and \lstinline!stm_commit_transaction()! require saving object references. -The type \inputencoding{latin9}\lstinline!object_t!\inputencoding{utf8} -is special as it causes the compiler% -\footnote{Clang 3.5 with some patches to this address-space 256 feature% -} to make all accesses through it relative to the $\%gs$ register. -With exceptions, nearly all accesses to objects managed by the TM -system should use this type so that the CPU will translate the reference -to the right version of the object. +The type \lstinline!object_t! is special as it causes the +compiler~\footnote{Clang 3.5 with some patches to this address-space + 256 feature} to make all accesses through it relative to the $\%gs$ +register. With exceptions, nearly all accesses to objects managed by +the TM system should use this type so that the CPU will translate the +reference to the right version of the object. \subsubsection{Setup\label{sub:Setup}} On startup, we reserve a big range of virtual memory with a call to -\inputencoding{latin9}\lstinline!mmap()!\inputencoding{utf8} and -partition this space into $N+1$ segments. We want to run $N$ threads -in parallel while segment 0 is designated as the \emph{sharing-segment -}that is never assigned to a thread. +\lstinline!mmap()! and partition this space into $N+1$ segments. We +want to run $N$ threads in parallel while segment 0 is designated as +the \emph{sharing-segment }that is never assigned to a thread. -The next step involves using \inputencoding{latin9}\lstinline!remap_file_pages()!\inputencoding{utf8}, -a Linux system call, to establish the fully-shared configuration. -All pages of segments $>0$ map to the pages of the sharing-segment. +The next step involves using \lstinline!remap_file_pages()!, a Linux +system call, to establish the fully-shared configuration. All pages +of segments $>0$ map to the pages of the sharing-segment. -However, the layout of a segment is not uniform and we actually privatize -a few areas again right away. These areas are illustrated in Figure -\ref{fig:Segment-Layout} and explained here: +However, the layout of a segment is not uniform and we actually +privatize a few areas again right away. These areas are illustrated in +Figure \ref{fig:Segment-Layout} and explained here: \begin{description}[noitemsep] -\item [{NULL~page:}] This page is unmapped and will produce a segmentation -violation when accessed. We use this to detect erroneous dereferencing -of \inputencoding{latin9}\lstinline!NULL!\inputencoding{utf8} references. -All $\%gs{::}SO$ translated to linear addresses will point to NULL -pages if SO is set to \inputencoding{latin9}\lstinline!NULL!\inputencoding{utf8}. -\item [{Segment-local~data:}] Some area private to the segment that contains -segment-local information. -\item [{Read~markers:}] These are pages that store information about which -objects were read in the current transaction running in this segment. -\item [{Nursery:}] This area contains all the freshly allocated objects -(\emph{young objects}) of the current transaction. The GC uses pointer-bump -allocation in this area to allocate objects in the first generation. +\item [{NULL~page:}] This page is unmapped and will produce a + segmentation violation when accessed. We use this to detect + erroneous dereferencing of \lstinline!NULL! references. All + $\%gs{::}SO$ translated to linear addresses will point to NULL pages + if SO is set to \lstinline!NULL!. +\item [{Segment-local~data:}] Some area private to the segment that + contains segment-local information. +\item [{Read~markers:}] These are pages that store information about + which objects were read in the current transaction running in this + segment. +\item [{Nursery:}] This area contains all the freshly allocated + objects (\emph{young objects}) of the current transaction. The GC + uses pointer-bump allocation in this area to allocate objects in the + first generation. \item [{Old~object~space:}] These pages are the ones that are really -shared between segments. They mostly contain old objects but also -some young ones that were too big to allocate in the nursery. + shared between segments. They mostly contain old objects but also + some young ones that were too big to allocate in the nursery. \end{description} + + \begin{figure*}[t] -\begin{centering} -\includegraphics[scale=0.8]{\string"segment layout\string".pdf} -\par\end{centering} + \begin{centering} + \includegraphics[scale=0.8]{\string"segment layout\string".pdf} + \par\end{centering} -\protect\caption{Segment Layout\label{fig:Segment-Layout}} + \protect\caption{Segment Layout\label{fig:Segment-Layout}} \end{figure*} - - \subsubsection{Assigning Segments} -From the above setup it is clear that the number of segments is statically -set to some $N$. That means that at any point in time, a maximum -of $N$ threads and their transactions can be running in parallel. -To support an unlimited number of threads in applications that use -this TM system, we assign segments dynamically to threads. +From the above setup it is clear that the number of segments is +statically set to some $N$. That means that at any point in time, a +maximum of $N$ threads and their transactions can be running in +parallel. To support an unlimited number of threads in applications +that use this TM system, we assign segments dynamically to threads. -At the start of a transaction, the thread it is running in acquires -a segment. It may have to wait until another thread finishes its transaction -and releases a segment. Fairness is not guaranteed yet, as we simply -assume a fair scheduling policy in the operating system when waiting -on a condition variable. +At the start of a transaction, the thread it is running in acquires a +segment. It may have to wait until another thread finishes its +transaction and releases a segment. Fairness is not guaranteed yet, as +we simply assume a fair scheduling policy in the operating system when +waiting on a condition variable. -Therefore, a thread may be assigned to different segments each time -it starts a transaction. Although, we try to assign it the same segment -again if possible. And a maximum of $N$ transactions may run in parallel. +Therefore, a thread may be assigned to different segments each time it +starts a transaction. Although, we try to assign it the same segment +again if possible. And a maximum of $N$ transactions may run in +parallel. \subsubsection{Garbage Collection} -Garbage collection plays a big role in our TM system. The GC is generational -and has two generations. +Garbage collection plays a big role in our TM system. The GC is +generational and has two generations. The \textbf{first generation}, where objects are considered to be -\emph{young }and reside in the \emph{Nursery}, is collected by \emph{minor -collections}. These collections move the surviving objects out of -the nursery into the old object space, which can be done without stopping -other threads. This is done either if the nursery has no space left -anymore or if we are committing the current transaction. Consequently, -all objects are old and the nursery empty after a transaction commits. -Furthermore, all objects in the nursery were always created in the -current transaction. This fact is useful since we do not need to call -any barrier on this kind of objects. +\emph{young }and reside in the \emph{Nursery}, is collected by +\emph{minor collections}. These collections move the surviving objects +out of the nursery into the old object space, which can be done +without stopping other threads. This is done either if the nursery has +no space left anymore or if we are committing the current +transaction. Consequently, all objects are old and the nursery empty +after a transaction commits. Furthermore, all objects in the nursery +were always created in the current transaction. This fact is useful +since we do not need to call any barrier on this kind of objects. -To improve this situation even more, we introduce the concept of \emph{overflow -objects}. If a minor collection needs to occur during a transaction, -we empty the nursery and mark each surviving object in the old object -space with an \inputencoding{latin9}\lstinline!overflow_number!\inputencoding{utf8} -globally unique to the current transaction. That way we can still -detect in a medium-fast path inside barriers that the object still -belongs to the current transaction. +To improve this situation even more, we introduce the concept of +\emph{overflow objects}. If a minor collection needs to occur during a +transaction, we empty the nursery and mark each surviving object in +the old object space with an \lstinline!overflow_number! globally +unique to the current transaction. That way we can still detect in a +medium-fast path inside barriers that the object still belongs to the +current transaction. The \textbf{second generation}, where objects are considered to be -\emph{old }and never move again, is collected by \emph{major collections}. -These collections are implemented in a stop-the-world kind of way -and first force minor collections in all threads. The major goal is -to free objects in the old objects space. Furthermore, we optimistically -re-share pages that do not need to be private anymore. +\emph{old }and never move again, is collected by \emph{major + collections}. These collections are implemented in a stop-the-world +kind of way and first force minor collections in all threads. The +major goal is to free objects in the old objects space. Furthermore, +we optimistically re-share pages that do not need to be private +anymore. -As seen in the API (section \ref{sub:Application-Programming-Interfac}), -we use a \emph{shadow stack }in order to provide precise garbage collection. -Any time we call a function that possibly triggers a collection, we -need to save the objects that we need afterwards on the shadow stack -using \inputencoding{latin9}\lstinline!STM_PUSH_ROOT()!\inputencoding{utf8}. -That way, they will not be freed. And in case they were young, we -get their new location in the old object space when getting them back -from the stack using \inputencoding{latin9}\lstinline!STM_POP_ROOT()!\inputencoding{utf8}. +As seen in the API (section~\ref{sub:Application-Programming-Interfac}), +we use a \emph{shadow stack} in order to provide precise garbage +collection. Any time we call a function that possibly triggers a +collection, we need to save the objects that we need afterwards on the +shadow stack using \lstinline!STM_PUSH_ROOT()!. That way, they will +not be freed. And in case they were young, we get their new location +in the old object space when getting them back from the stack using +\lstinline!STM_POP_ROOT()!. \subsubsection{Read Barrier} -The point of the read barrier is to add the object to the read set -of the transaction. This information is needed to detect conflicts -between transactions. Usually, it also resolves an object reference -to a private copy, but since the CPU performs our address translation -on every object access efficiently, we do not need to do that in our -barrier. +The point of the read barrier is to add the object to the read set of +the transaction. This information is needed to detect conflicts +between transactions. Usually, it also resolves an object reference to +a private copy, but since the CPU performs our address translation on +every object access efficiently, we do not need to do that in our +barrier. -To add the object to the read set, for us it is enough to mark it -as read. Since this information needs to be local to the segment, -we need to store it in private pages. The area is called \emph{read -markers }and already mentioned in section \ref{sub:Setup}. This area -can be seen as a continuous array of bytes that is indexed from the -start of the segment by an object's reference ($SO$) divided by 16 -(this requires objects of at least 16 bytes in size). Instead of just -setting the byte to \inputencoding{latin9}\lstinline!true!\inputencoding{utf8} -if the corresponding object was read, we set it to a \inputencoding{latin9}\lstinline!read_version!\inputencoding{utf8} -belonging to the transaction, which will be incremented on each commit. -Thereby, we can avoid resetting the bytes to \inputencoding{latin9}\lstinline!false!\inputencoding{utf8} -on commit and only need to do this every 255 transactions. The whole -code for the barrier is easily optimizable for compilers as well as -perfectly predictable for CPUs: +To add the object to the read set, for us it is enough to mark it as +read. Since this information needs to be local to the segment, we need +to store it in private pages. The area is called \emph{read markers +}and already mentioned in section \ref{sub:Setup}. This area can be +seen as a continuous array of bytes that is indexed from the start of +the segment by an object's reference ($SO$) divided by 16 (this +requires objects of at least 16 bytes in size). Instead of just +setting the byte to \lstinline!true! if the corresponding object was +read, we set it to a \lstinline!read_version! belonging to the +transaction, which will be incremented on each commit. Thereby, we +can avoid resetting the bytes to \lstinline!false! on commit and only +need to do this every 255 transactions. The whole code for the barrier +is easily optimizable for compilers as well as perfectly predictable +for CPUs: -\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] void stm_read(SO): *(SO >> 4) = read_version \end{lstlisting} -\inputencoding{utf8} + \subsubsection{Write Barrier} The job of the write barrier is twofold: first, it serves as a write -barrier for the garbage collector and second, it supports copy-on-write -and adds objects to the write set of the transaction. +barrier for the garbage collector and second, it supports +copy-on-write and adds objects to the write set of the transaction. The \textbf{fast path} of the write barrier is very simple. We only -need to check for the flag \inputencoding{latin9}\lstinline!WRITE_BARRIER!\inputencoding{utf8} -in the object's header and call the slow path if it is set. This flag -is set either if the object is old and comes from an earlier transaction, -or if there was a minor collection which will add the flag again on -all objects. It is never set on freshly allocated objects that still +need to check for the flag \lstinline!WRITE_BARRIER! in the object's +header and call the slow path if it is set. This flag is set either if +the object is old and comes from an earlier transaction, or if there +was a minor collection which will add the flag again on all +objects. It is never set on freshly allocated objects that still reside in the nursery. -\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] void stm_write(SO): if SO->flags & WRITE_BARRIER: write_slowpath(SO) \end{lstlisting} -\inputencoding{utf8} + The \textbf{slow path} is shown here: -\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] void write_slowpath(SO): // GC part: list_append(to_trace, SO) @@ -734,49 +741,49 @@ goto retry SO->flags &= ~WRITE_BARRIER \end{lstlisting} -\inputencoding{utf8} + First comes the \emph{GC part}: In any case, the object will be added to the list of objects that need tracing in the next minor collection -(\inputencoding{latin9}\lstinline!to_trace!\inputencoding{utf8}). -This is necessary in case we write a reference to it that points to -a young object. We then need to trace it during the next minor collection -in order to mark the young object alive and to update its reference -to the new location it gets moved to. The check for \inputencoding{latin9}\lstinline!is_overflow_obj()!\inputencoding{utf8} -tells us if the object was actually created in this transaction. In -that case, we do not need to execute the following \emph{TM part}. -We especially do not need to privatize the page since no other transaction -knows about these ``old'' objects. +(\lstinline!to_trace!). This is necessary in case we write a +reference to it that points to a young object. We then need to trace +it during the next minor collection in order to mark the young object +alive and to update its reference to the new location it gets moved +to. The check for \lstinline!is_overflow_obj()! tells us if the +object was actually created in this transaction. In that case, we do +not need to execute the following \emph{TM part}. We especially do +not need to privatize the page since no other transaction knows about +these ``old'' objects. -For TM, we first perform a read barrier on the object. We then try -to acquire its write lock. \inputencoding{latin9}\lstinline!write_locks!\inputencoding{utf8} -again is a simple global array of bytes that is indexed with the SO -of the object divided by 16. If we already own the lock, we are done. -If someone else owns the lock, we will do a write-write contention -management that will abort either us or the current owner of the object. -If we succeed in acquiring the lock using an atomic \inputencoding{latin9}\lstinline!cmp_and_swap!\inputencoding{utf8}, -we need to add the object to the write set (a simple list called \inputencoding{latin9}\lstinline!modified_old_objects!\inputencoding{utf8}) -and privatize all pages belonging to it (copy-on-write). +For TM, we first perform a read barrier on the object. We then try to +acquire its write lock. \lstinline!write_locks! again is a simple +global array of bytes that is indexed with the SO of the object +divided by 16. If we already own the lock, we are done. If someone +else owns the lock, we will do a write-write contention management +that will abort either us or the current owner of the object. If we +succeed in acquiring the lock using an atomic +\lstinline!cmp_and_swap!, we need to add the object to the write set +(a simple list called \lstinline!modified_old_objects!) and privatize +all pages belonging to it (copy-on-write). -In all cases, we remove the \inputencoding{latin9}\lstinline!WRITE_BARRIER!\inputencoding{utf8} -flag from the object before we return. Thus, we never trigger the -slow path again before we do the next minor collection (also part -of a commit) or we start the next transaction. +In all cases, we remove the \lstinline!WRITE_BARRIER! flag from the +object before we return. Thus, we never trigger the slow path again +before we do the next minor collection (also part of a commit) or we +start the next transaction. \subsubsection{Abort} -Aborting a transaction is rather easy. The first step is to reset -the nursery and all associated data structures. The second step is -to go over all objects in the write set (\inputencoding{latin9}\lstinline!modified_old_objects!\inputencoding{utf8}) +Aborting a transaction is rather easy. The first step is to reset the +nursery and all associated data structures. The second step is to go +over all objects in the write set (\lstinline!modified_old_objects!) and reset any modifications in our private pages by copying from the -sharing-segment. What is left is to use \inputencoding{latin9}\lstinline!longjmp()!\inputencoding{utf8} -to jump back to the location initialized by a \inputencoding{latin9}\lstinline!setjmp()!\inputencoding{utf8} -in \inputencoding{latin9}\lstinline!stm_start_transaction()!\inputencoding{utf8}. -Increasing the \inputencoding{latin9}\lstinline!read_version!\inputencoding{utf8} -is also done there. +sharing-segment. What is left is to use \lstinline!longjmp()! to jump +back to the location initialized by a \lstinline!setjmp()! in +\lstinline!stm_start_transaction()!. Increasing the +\lstinline!read_version! is also done there. @@ -784,23 +791,23 @@ \subsubsection{Commit} Committing a transaction needs a bit more work. First, we synchronize -all threads so that the committing one is the only one running and -all the others are waiting in a safe point. We then go through the -write set (\inputencoding{latin9}\lstinline!modified_old_objects!\inputencoding{utf8}) -and check the corresponding \inputencoding{latin9}\lstinline!read_markers!\inputencoding{utf8} -in other threads/segments. If we detect a read-write conflict, we -do contention management to either abort us or the other transaction, -or to simply wait a bit. +all threads so that the committing one is the only one running and all +the others are waiting in a safe point. We then go through the write +set (\lstinline!modified_old_objects!) and check the corresponding +\lstinline!read_markers! in other threads/segments. If we detect a +read-write conflict, we do contention management to either abort us or +the other transaction, or to simply wait a bit. After verifying that there are no conflicts anymore, we copy all our changes done to the objects in the write set to all other segments, -including the sharing-segment. This is safe since we synchronized -all threads. We also need to push overflow objects generated by minor +including the sharing-segment. This is safe since we synchronized all +threads. We also need to push overflow objects generated by minor collections to other segments, since they may reside partially in -private pages. At that point we also get a new \inputencoding{latin9}\lstinline!overflow_number!\inputencoding{utf8} -by increasing a global one, so that it stays globally unique for each -transaction. Increasing the \inputencoding{latin9}\lstinline!read_version!\inputencoding{utf8} -is then done at the start of a new transaction. +private pages. At that point we also get a new +\lstinline!overflow_number! by increasing a global one, so that it +stays globally unique for each transaction. Increasing the +\lstinline!read_version! is then done at the start of a new +transaction. @@ -809,16 +816,16 @@ -A requirement for performing a commit is to synchronize all threads -so that we can safely update objects in other segments. To make this +A requirement for performing a commit is to synchronize all threads so +that we can safely update objects in other segments. To make this synchronization fast and cheap, we do not want to insert an additional check regularly in order to see if synchronization is requested. We use a trick relying on the fact that dynamic languages are usually very high-level and thus allocate a lot of objects very regularly. -This is done through the function \inputencoding{latin9}\lstinline!stm_allocate!\inputencoding{utf8} -shown below: +This is done through the function \lstinline!stm_allocate! shown +below: -\inputencoding{latin9}\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] object_t *stm_allocate(ssize_t size_rounded): result = nursery_current nursery_current += size_rounded @@ -826,21 +833,22 @@ return allocate_slowpath(size_rounded) return result \end{lstlisting} -\inputencoding{utf8} + This code does simple pointer-bump allocation in the nursery. If there -is still space left in the nursery, we return \inputencoding{latin9}\lstinline!nursery_current!\inputencoding{utf8} -and bump it up by \inputencoding{latin9}\lstinline!size_rounded!\inputencoding{utf8}. -The interesting part is the check \inputencoding{latin9}\lstinline!nursery_current > nursery_end!\inputencoding{utf8} -which will trigger the slow path of the function to possibly perform -a minor collection in order to free up space in the nursery. +is still space left in the nursery, we return +\lstinline!nursery_current! and bump it up by +\lstinline!size_rounded!. The interesting part is the check +\lstinline!nursery_current > nursery_end! which will trigger the slow +path of the function to possibly perform a minor collection in order +to free up space in the nursery. If we want to synchronize all threads, we can rely on this check being -performed regularly. So what we do is to set the \inputencoding{latin9}\lstinline!nursery_end!\inputencoding{utf8} -to $0$ in all segments that we want to synchronize. The mentioned -check will then fail in those segments and call the slow path. In -\inputencoding{latin9}\lstinline!allocate_slowpath!\inputencoding{utf8} -they can simply check for this condition and enter a safe point. +performed regularly. So what we do is to set the +\lstinline!nursery_end! to $0$ in all segments that we want to +synchronize. The mentioned check will then fail in those segments and +call the slow path. In \lstinline!allocate_slowpath! they can simply +check for this condition and enter a safe point. For other synchronization requirements, for example: \begin{itemize}[noitemsep] @@ -857,21 +865,19 @@ the problem as well as we can. The general rules are: \begin{itemize}[noitemsep] \item prefer transactions that started earlier to younger transactions -\item to support \emph{inevitable} transactions, we always prefer them to -others since they cannot abort +\item to support \emph{inevitable} transactions, we always prefer them + to others since they cannot abort \end{itemize} We can either simply abort a transaction to let the other one succeed, or we can also wait until the other transaction committed. The latter -is an interesting option if we are trying to commit a write and another -transaction already read the object. We can then signal the other -transaction to commit as soon as possible and wait. After waiting, -there is now no conflict between our write and the already committed -read anymore. +is an interesting option if we are trying to commit a write and +another transaction already read the object. We can then signal the +other transaction to commit as soon as possible and wait. After +waiting, there is now no conflict between our write and the already +committed read anymore. -\textbf{} - \section{Experimental Results} From noreply at buildbot.pypy.org Tue May 6 11:24:03 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 11:24:03 +0200 (CEST) Subject: [pypy-commit] pypy default: README.rst does not relate to release version Message-ID: <20140506092403.A18521C317E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71321:8479e8d52010 Date: 2014-05-06 11:41 +0300 http://bitbucket.org/pypy/pypy/changeset/8479e8d52010/ Log: README.rst does not relate to release version diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,7 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py From noreply at buildbot.pypy.org Tue May 6 11:24:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 11:24:04 +0200 (CEST) Subject: [pypy-commit] pypy default: document handling of PYPY_IRC_TOPIC for releases Message-ID: <20140506092404.DD6CC1C317E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71322:d010fbc1841c Date: 2014-05-06 11:46 +0300 http://bitbucket.org/pypy/pypy/changeset/d010fbc1841c/ Log: document handling of PYPY_IRC_TOPIC for releases diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,6 +28,10 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release +* merge PYPY_IRC_TOPIC environment variable handling from previous release + in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and + pypy/interpreter/app_main.py so release versions will not print a random + IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py From noreply at buildbot.pypy.org Tue May 6 11:24:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 11:24:06 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge default into branch Message-ID: <20140506092406.1F2301C317E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71323:b40b13f73bbc Date: 2014-05-06 11:47 +0300 http://bitbucket.org/pypy/pypy/changeset/b40b13f73bbc/ Log: merge default into branch diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,7 +28,10 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* update README +* merge PYPY_IRC_TOPIC environment variable handling from previous release + in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and + pypy/interpreter/app_main.py so release versions will not print a random + IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py From noreply at buildbot.pypy.org Tue May 6 11:24:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 11:24:07 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: only print IRC topics when PYPY_IRC_TOPIC exists and is not empty Message-ID: <20140506092407.5F5351C317E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71324:3b1a6d6b096e Date: 2014-05-06 12:23 +0300 http://bitbucket.org/pypy/pypy/changeset/3b1a6d6b096e/ Log: only print IRC topics when PYPY_IRC_TOPIC exists and is not empty diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -373,6 +373,12 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. +PYPY_IRC_TOPIC ++++++++++++++++ + +Set the ``PYPY_IRC_TOPIC`` environment variable to a non-empty string +to print a random #pypy IRC topic at startup of interactive mode. + Getting involved ----------------- diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -113,6 +113,11 @@ generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. +``PYPY_IRC_TOPIC`` + If set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. + + .. include:: ../gc_info.rst :start-line: 7 diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -35,6 +35,9 @@ PYTHONPATH : %r-separated list of directories prefixed to the default module search path. The result is sys.path. PYTHONIOENCODING: Encoding[:errors] used for stdin/stdout/stderr. +PYPY_IRC_TOPIC: if set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. +PYPYLOG: If set to a non-empty value, enable logging. """ import sys @@ -668,7 +671,9 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - success = run_toplevel(interactive_console, mainmodule) + irc_topic = readenv and os.getenv('PYPY_IRC_TOPIC') + success = run_toplevel(interactive_console, mainmodule, + quiet=not irc_topic) except SystemExit, e: status = e.code else: From noreply at buildbot.pypy.org Tue May 6 12:29:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 12:29:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Attempt to solve a remaining source of conflicts Message-ID: <20140506102905.29E321C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71325:b5b91700900b Date: 2014-05-06 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/b5b91700900b/ Log: Attempt to solve a remaining source of conflicts diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -159,7 +159,12 @@ size_est = (oldattr._size_estimate + attr.size_estimate() - oldattr.size_estimate()) assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) - oldattr._size_estimate = size_est + # This write is "stm ignored", which means that we're doing + # a best-effort attempt at updating the value, but other threads + # may or may not see the update. The benefit is that it will + # never create conflicts. + with objectmodel.stm_ignored: + oldattr._size_estimate = size_est if attr.length() > obj._mapdict_storage_length(): # note that attr.size_estimate() is always at least attr.length() new_storage = [None] * attr.size_estimate() From noreply at buildbot.pypy.org Tue May 6 12:29:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 12:29:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Move the jit-less logic into a separate function, so that the codewriter doesn't see at all the stm_ignored block Message-ID: <20140506102906.6DB8A1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71326:44d4ecbdbe2e Date: 2014-05-06 11:32 +0200 http://bitbucket.org/pypy/pypy/changeset/44d4ecbdbe2e/ Log: Move the jit-less logic into a separate function, so that the codewriter doesn't see at all the stm_ignored block diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -132,6 +132,18 @@ def size_estimate(self): return self._size_estimate >> NUM_DIGITS + @jit.dont_look_inside + def _update_size_estimate(self, new_size_estimate): + size_est = (self._size_estimate + new_size_estimate + - self.size_estimate()) + assert size_est >= (self.length() * NUM_DIGITS_POW2) + # This write is "stm ignored", which means that we're doing + # a best-effort attempt at updating the value, but other threads + # may or may not see the update. The benefit is that it will + # never create conflicts. + with objectmodel.stm_ignored: + self._size_estimate = size_est + def search(self, attrtype): return None @@ -156,15 +168,7 @@ attr = self._get_new_attr(selector[0], selector[1]) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): - size_est = (oldattr._size_estimate + attr.size_estimate() - - oldattr.size_estimate()) - assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) - # This write is "stm ignored", which means that we're doing - # a best-effort attempt at updating the value, but other threads - # may or may not see the update. The benefit is that it will - # never create conflicts. - with objectmodel.stm_ignored: - oldattr._size_estimate = size_est + oldattr._update_size_estimate(attr.size_estimate()) if attr.length() > obj._mapdict_storage_length(): # note that attr.size_estimate() is always at least attr.length() new_storage = [None] * attr.size_estimate() From noreply at buildbot.pypy.org Tue May 6 12:29:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 12:29:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: merge heads Message-ID: <20140506102907.C99931C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71327:fa594293079d Date: 2014-05-06 12:28 +0200 http://bitbucket.org/pypy/pypy/changeset/fa594293079d/ Log: merge heads diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -822e52f17647 +84f5fbe03d5d diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -306,6 +306,12 @@ STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { + /* We should commit soon, probably. This is kind of a + workaround for the broken stm_should_break_transaction of + pypy that doesn't want to commit any more after a minor + collection. It may, however, always be a good idea... */ + stmcb_commit_soon(); + /* 'STM_PSEGMENT->overflow_number' is used now by this collection, in the sense that it's copied to the overflow objects */ STM_PSEGMENT->overflow_number_has_been_used = true; diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -23,6 +23,16 @@ pypy_stmcb_trace(obj, (void(*)(void*))visit); } +inline void stmcb_commit_soon() +{ + if (pypy_stm_nursery_low_fill_mark == (uintptr_t)-1) { + /* atomic */ + pypy_stm_nursery_low_fill_mark_saved = 0; + } else { + pypy_stm_nursery_low_fill_mark >>= 2; + } +} + /************************************************************/ /* "include" the stmgc.c file here */ @@ -147,7 +157,7 @@ transaction. */ assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || (pypy_stm_nursery_low_fill_mark == 0)); stm_commit_transaction(); @@ -182,7 +192,7 @@ transaction whose jmpbuf points into this function */ if (pypy_stm_ready_atomic == 1) { - assert(pypy_stm_nursery_low_fill_mark != 0); + //assert(pypy_stm_nursery_low_fill_mark != 0); assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); stm_commit_transaction(); pypy_stm_nursery_low_fill_mark = 0; @@ -196,7 +206,7 @@ } /* double-check */ if (pypy_stm_ready_atomic == 1) { - assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || (pypy_stm_nursery_low_fill_mark == 0)); } else { @@ -234,5 +244,3 @@ _pypy_stm_inev_state(); stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); } - -void stmcb_commit_soon(void) { /*XXX FIXME*/ } diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -73,7 +73,7 @@ case 1: pypy_stm_nursery_low_fill_mark = pypy_stm_nursery_low_fill_mark_saved; assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - assert((STM_SEGMENT->jmpbuf_ptr == NULL) == + assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || (pypy_stm_nursery_low_fill_mark == 0)); break; case 0: From noreply at buildbot.pypy.org Tue May 6 13:15:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 13:15:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Bah, py.path's relto() method is dangerous. It silently returns '' when Message-ID: <20140506111545.34FBD1D2A50@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71328:b79be2085a34 Date: 2014-05-06 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/b79be2085a34/ Log: Bah, py.path's relto() method is dangerous. It silently returns '' when it fails. Check with asserts the places where we implicitly expect to get a correct path. diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -537,6 +537,8 @@ pypkgpath = localpath.pypkgpath() if pypkgpath: relpypath = localpath.relto(pypkgpath.dirname) + assert relpypath, ("%r should be relative to %r" % + (localpath, pypkgpath.dirname)) return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -170,6 +170,9 @@ ofile = cfile.new(ext=ext) if ofile.relto(udir): return ofile + assert ofile.relto(rpythonroot), ( + "%r should be relative to either %r or %r" % ( + ofile, rpythonroot, udir)) ofile = udir.join(ofile.relto(rpythonroot)) ofile.dirpath().ensure(dir=True) return ofile diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -253,6 +253,9 @@ if fpath.dirpath() == self.makefile_dir: return fpath.basename elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath(): + assert fpath.relto(self.makefile_dir.dirpath()), ( + "%r should be relative to %r" % ( + fpath, self.makefile_dir.dirpath())) path = '../' + fpath.relto(self.makefile_dir.dirpath()) return path.replace('\\', '/') else: From noreply at buildbot.pypy.org Tue May 6 13:20:16 2014 From: noreply at buildbot.pypy.org (xando) Date: Tue, 6 May 2014 13:20:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Making rpythondir more robust, this should handle rpython symlinking Message-ID: <20140506112016.5368D1D2A50@cobra.cs.uni-duesseldorf.de> Author: Sebastian Pawlu? Branch: Changeset: r71329:13fe2051d122 Date: 2014-05-06 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/13fe2051d122/ Log: Making rpythondir more robust, this should handle rpython symlinking diff --git a/rpython/tool/version.py b/rpython/tool/version.py --- a/rpython/tool/version.py +++ b/rpython/tool/version.py @@ -2,7 +2,7 @@ import os from subprocess import Popen, PIPE import rpython -rpythondir = os.path.dirname(os.path.abspath(rpython.__file__)) +rpythondir = os.path.dirname(os.path.realpath(rpython.__file__)) rpythonroot = os.path.dirname(rpythondir) default_retval = '?', '?' From noreply at buildbot.pypy.org Tue May 6 13:20:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 13:20:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in xando/pypy (pull request #237) Message-ID: <20140506112017.837C31D2A50@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71330:55006afd91fe Date: 2014-05-06 13:19 +0200 http://bitbucket.org/pypy/pypy/changeset/55006afd91fe/ Log: Merged in xando/pypy (pull request #237) Making rpythondir more robust,handle rpython path symlinking diff --git a/rpython/tool/version.py b/rpython/tool/version.py --- a/rpython/tool/version.py +++ b/rpython/tool/version.py @@ -2,7 +2,7 @@ import os from subprocess import Popen, PIPE import rpython -rpythondir = os.path.dirname(os.path.abspath(rpython.__file__)) +rpythondir = os.path.dirname(os.path.realpath(rpython.__file__)) rpythonroot = os.path.dirname(rpythondir) default_retval = '?', '?' From noreply at buildbot.pypy.org Tue May 6 13:39:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 13:39:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Don't call install_new_cell() here in case of STM Message-ID: <20140506113958.B12441C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71331:c84e52937998 Date: 2014-05-06 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/c84e52937998/ Log: Don't call install_new_cell() here in case of STM diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -1,4 +1,5 @@ from rpython.rlib.rarithmetic import r_singlefloat, r_uint +from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -154,7 +155,11 @@ def cleanup_chain(self, hash): self.reset(hash) - self.install_new_cell(hash, None) + # Next, clean up the chained list by removing the cells that + # need to be removed. For now we don't do it with STM because + # this creates pointless conflicts. + if not rgc.stm_is_enabled(): + self.install_new_cell(hash, None) def install_new_cell(self, hash, newcell): index = self._get_index(hash) From noreply at buildbot.pypy.org Tue May 6 16:47:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 16:47:00 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_pypy_c.test_struct on 32bit Message-ID: <20140506144700.63B901D2A50@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71332:30f583e2d624 Date: 2014-05-06 10:45 -0400 http://bitbucket.org/pypy/pypy/changeset/30f583e2d624/ Log: fix test_pypy_c.test_struct on 32bit diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -1,6 +1,18 @@ +import sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +if sys.maxsize == 2 ** 63 - 1: + extra = """ + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + """ +else: + extra = "" + + class TestStruct(BaseTestPyPyC): def test_struct_function(self): def main(n): @@ -20,10 +32,7 @@ assert loop.match_by_id("struct", """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - i9 = int_le(i4, 2147483647) - guard_true(i9, descr=...) + %s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -41,7 +50,7 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) + """ % extra) def test_struct_object(self): def main(n): @@ -60,10 +69,7 @@ assert loop.match_by_id('struct', """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - i9 = int_le(i4, 2147483647) - guard_true(i9, descr=...) + %s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -81,4 +87,4 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) + """ % extra) From noreply at buildbot.pypy.org Tue May 6 16:48:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 6 May 2014 16:48:33 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: fix test_pypy_c.test_struct on 32bit Message-ID: <20140506144833.DF8601D2A50@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71333:1f92ec2eff46 Date: 2014-05-06 10:45 -0400 http://bitbucket.org/pypy/pypy/changeset/1f92ec2eff46/ Log: fix test_pypy_c.test_struct on 32bit diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -1,6 +1,18 @@ +import sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +if sys.maxsize == 2 ** 63 - 1: + extra = """ + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + """ +else: + extra = "" + + class TestStruct(BaseTestPyPyC): def test_struct_function(self): def main(n): @@ -20,10 +32,7 @@ assert loop.match_by_id("struct", """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - i9 = int_le(i4, 2147483647) - guard_true(i9, descr=...) + %s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -41,7 +50,7 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) + """ % extra) def test_struct_object(self): def main(n): @@ -60,10 +69,7 @@ assert loop.match_by_id('struct', """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - i9 = int_le(i4, 2147483647) - guard_true(i9, descr=...) + %s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -81,4 +87,4 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) + """ % extra) From noreply at buildbot.pypy.org Tue May 6 18:55:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 18:55:39 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Record a flag "did I do any allocation?" Message-ID: <20140506165539.862491D2A4E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71334:21e636ca9f67 Date: 2014-05-06 18:55 +0200 http://bitbucket.org/pypy/pypy/changeset/21e636ca9f67/ Log: Record a flag "did I do any allocation?" diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -36,7 +36,7 @@ _previous_size = -1 _op_malloc_nursery = None _v_last_malloced_nursery = None - c_zero = ConstInt(0) + does_any_allocation = False def __init__(self, gc_ll_descr, cpu): self.gc_ll_descr = gc_ll_descr @@ -89,6 +89,7 @@ # ---------- def handle_malloc_operation(self, op): + self.does_any_allocation = True opnum = op.getopnum() if opnum == rop.NEW: self.handle_new_fixedsize(op.getdescr(), op) @@ -161,6 +162,7 @@ raise NotImplementedError(op.getopname()) def gen_malloc_frame(self, frame_info, frame): + self.does_any_allocation = True descrs = self.gc_ll_descr.getframedescrs(self.cpu) if self.gc_ll_descr.kind == 'boehm': size_box = history.BoxInt() diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -26,7 +26,7 @@ return # ---------- transaction breaks ---------- if opnum == rop.STM_SHOULD_BREAK_TRANSACTION: - self.newops.append(op) + self.handle_should_break_transaction() return if opnum == rop.STM_TRANSACTION_BREAK: self.emitting_an_operation_that_can_collect() @@ -120,6 +120,12 @@ self.newops.append(op1) self.read_barrier_applied[v_ptr] = None + def handle_should_break_transaction(self): + op1 = ResOperation(rop.STM_SHOULD_BREAK_TRANSACTION, + [ConstInt(not self.does_any_allocation)], None) + self.newops.append(op1) + self.does_any_allocation = True + def must_apply_write_barrier(self, val, v=None): return val not in self.write_barrier_applied diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1229,3 +1229,35 @@ i3 = call_assembler(p1, descr=casmdescr) {54} guard_not_forced() [] {55} """) + + def test_stm_should_break_transaction_no_malloc(self): + self.check_rewrite(""" + [] + stm_should_break_transaction(0) + """, """ + [] + stm_should_break_transaction(1) + """) + + def test_stm_should_break_transaction_with_malloc(self): + self.check_rewrite(""" + [] + p2 = new(descr=tdescr) + stm_should_break_transaction(0) + """, """ + [] + p2 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) + stm_should_break_transaction(0) + """) + + def test_double_stm_should_break_allocation(self): + self.check_rewrite(""" + [] + stm_should_break_transaction(0) + stm_should_break_transaction(0) + """, """ + [] + stm_should_break_transaction(1) + stm_should_break_transaction(0) + """) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -207,7 +207,8 @@ if val: # app-level loop: only one of these per loop is really needed resbox = history.BoxInt(0) - mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) + mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, + [history.CONST_FALSE], resbox) self.metainterp.heapcache.stm_break_done() return resbox else: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -493,7 +493,7 @@ 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', - 'STM_SHOULD_BREAK_TRANSACTION/0', + 'STM_SHOULD_BREAK_TRANSACTION/1', # flag: increase nursery_current? 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway From noreply at buildbot.pypy.org Tue May 6 18:57:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 18:57:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Merge and start refactoring stm.rst Message-ID: <20140506165701.101801D2A4E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71335:b0b7c78b3182 Date: 2014-05-06 18:56 +0200 http://bitbucket.org/pypy/pypy/changeset/b0b7c78b3182/ Log: Merge and start refactoring stm.rst diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -3,144 +3,401 @@ Software Transactional Memory ============================= +.. contents:: + + +This page is about ``pypy-stm``, a special in-development version of +PyPy which can run multiple independent CPU-hungry threads in the same +process in parallel. It is a solution to what is known in the Python +world as the "global interpreter lock (GIL)" problem --- it is an +implementation of Python without the GIL. + +"STM" stands for Software `Transactional Memory`_, the technique used +internally. This page describes ``pypy-stm`` from the perspective of a +user, describes work in progress, and finally gives references to more +implementation details. + +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the `2nd call +for donation`_. + +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory +.. _`2nd call for donation`: http://pypy.org/tmdonate2.html + Introduction ============ -PyPy can be translated in a special mode based on Software Transactional -Memory (STM). This mode is not compatible with the JIT so far, and moreover -adds a constant run-time overhead (so far 4-5x). +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +listed below, it should be in theory within 20%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called +STM for Software Transactional Memory, which is the internal technique +used (see `Reference to implementation details`_). + The benefit is that the resulting ``pypy-stm`` can execute multiple -threads of Python code in parallel. +threads of Python code in parallel. Programs running two threads or +more in parallel should ideally run faster than in a regular PyPy +(either now, or soon as bugs are fixed). -* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use it - as a drop-in replacement and multithreaded programs will run on multiple - cores. +* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use + it as a drop-in replacement and multithreaded programs will run on + multiple cores. -* ``pypy-stm`` adds a low-level API in the ``thread`` module, namely - ``thread.atomic``, that can be used as described below. This is meant - to improve existing multithread-based programs. It is also meant to - be used to build higher-level interfaces on top of it. +* ``pypy-stm`` does not impose any special API to the user, but it + provides a new pure Python module called `transactional_memory`_ with + features to inspect the state or debug conflicts_ that prevent + parallelization. This module can also be imported on top of a non-STM + PyPy or CPython. -* A number of higher-level interfaces are planned, using internally - threads and ``thread.atomic``. They are meant to be used in - non-thread-based programs. Given the higher level, we also recommend - using them in new programs instead of structuring your program to use - raw threads. +* Building on top of the way the GIL is removed, we will talk + about `Atomic sections, Transactions, etc.: a better way to write + parallel programs`_. +Getting Started +=============== + +**pypy-stm requires 64-bit Linux for now.** + +Development is done in the branch `stmgc-c7`_. If you are only +interested in trying it out, you can download a Ubuntu binary here__ +(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are +release mode, but not stripped of debug symbols). The current version +supports four "segments", which means that it will run up to four +threads in parallel. + +To build a version from sources, you first need to compile a custom +version of clang(!); we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 `` +for all checkouts). Then apply all the patches in `this directory`__: +they are fixes for a clang-only feature that hasn't been used so heavily +in the past (without the patches, you get crashes of clang). Then get +the branch `stmgc-c7`_ of PyPy and run:: + + rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py + +.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ +.. __: http://clang.llvm.org/get_started.html +.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ + + +.. _caveats: + +Current status +-------------- + +* So far, small examples work fine, but there are still a few bugs. + We're busy fixing them as we find them; feel free to `report bugs`_. + +* Currently limited to 1.5 GB of RAM (this is just a parameter in + `core.h`__). Memory overflows are not correctly handled; they cause + segfaults. + +* The JIT warm-up time improved recently but is still bad. In order to + produce machine code, the JIT needs to enter a special single-threaded + mode for now. This means that you will get bad performance results if + your program doesn't run for several seconds, where *several* can mean + *many.* When trying benchmarks, be sure to check that you have + reached the warmed state, i.e. the performance is not improving any + more. This should be clear from the fact that as long as it's + producing more machine code, ``pypy-stm`` will run on a single core. + +* The GC is new; although clearly inspired by PyPy's regular GC, it + misses a number of optimizations for now. Programs allocating large + numbers of small objects that don't immediately die, as well as + programs that modify large lists or dicts, suffer from these missing + optimizations. + +* The GC has no support for destructors: the ``__del__`` method is never + called (including on file objects, which won't be closed for you). + This is of course temporary. Also, weakrefs might appear to work a + bit strangely for now (staying alive even though ``gc.collect()``, or + even dying but then un-dying for a short time before dying again). + +* The STM system is based on very efficient read/write barriers, which + are mostly done (their placement could be improved a bit in + JIT-generated machine code). But the overall bookkeeping logic could + see more improvements (see `Low-level statistics`_ below). + +* Forking the process is slow because the complete memory needs to be + copied manually. A warning is printed to this effect. + +* Very long-running processes (on the order of days) will eventually + crash on an assertion error because of a non-implemented overflow of + an internal 29-bit number. + +.. _`report bugs`: https://bugs.pypy.org/ +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h +.. __: + + + +User Guide +========== + + Drop-in replacement -=================== +------------------- Multithreaded, CPU-intensive Python programs should work unchanged on ``pypy-stm``. They will run using multiple CPU cores in parallel. -(The existing semantics of the GIL (Global Interpreter Lock) are +The existing semantics of the GIL (Global Interpreter Lock) are unchanged: although running on multiple cores in parallel, ``pypy-stm`` gives the illusion that threads are run serially, with switches only -occurring between bytecodes, not in the middle of one.) +occurring between bytecodes, not in the middle of them. Programs can +rely on this: using ``shared_list.append()/pop()`` or +``shared_dict.setdefault()`` as synchronization mecanisms continues to +work as expected. +This works by internally considering the points where a standard PyPy or +CPython would release the GIL, and replacing them with the boundaries of +"transaction". Like their database equivalent, multiple transactions +can execute in parallel, but will commit in some serial order. They +appear to behave as if they were completely run in this serialization +order. -High-level interface -==================== -Alternatively, if you have a program not using threads, but containing a -loop that runs "chunks" of work in random order:: +Atomic sections +--------------- - somedict = {...} - while len(somedict) > 0: - key, value = somedict.popitem() - do_work(key, value) # which may add more things to 'somedict' +PyPy supports *atomic sections,* which are blocks of code which you want +to execute without "releasing the GIL". *This is experimental and may +be removed in the future.* In STM terms, this means blocks of code that +are executed while guaranteeing that the transaction is not interrupted +in the middle. -Then you can parallelize it *without using threads* by replacing this -loop with code like this:: +Here is a usage example:: - transaction.add(do_work, initialkey1, initialvalue1) - transaction.add(do_work, initialkey2, initialvalue2) + with __pypy__.thread.atomic: + assert len(lst1) == 10 + x = lst1.pop(0) + lst1.append(x) + +In this (bad) example, we are sure that the item popped off one end of +the list is appened again at the other end atomically. It means that +another thread can run ``len(lst1)`` or ``x in lst1`` without any +particular synchronization, and always see the same results, +respectively ``10`` and ``True``. It will never see the intermediate +state where ``lst1`` only contains 9 elements. Atomic sections are +similar to re-entrant locks (they can be nested), but additionally they +protect against the concurrent execution of *any* code instead of just +code that happens to be protected by the same lock in other threads. + +Note that the notion of atomic sections is very strong. If you write +code like this:: + + with __pypy__.thread.atomic: + time.sleep(10) + +then, if you think about it as if we had a GIL, you are executing a +10-seconds-long atomic transaction without releasing the GIL at all. +This prevents all other threads from progressing at all. While it is +not strictly true in ``pypy-stm``, the exact rules for when other +threads can progress or not are rather complicated; you have to consider +it likely that such a piece of code will eventually block all other +threads anyway. + +Note that if you want to experiment with ``atomic``, you may have to add +manually a transaction break just before the atomic block. This is +because the boundaries of the block are not guaranteed to be the +boundaries of the transaction: the latter is at least as big as the +block, but maybe bigger. Therefore, if you run a big atomic block, it +is a good idea to break the transaction just before. This can be done +e.g. by the hack of calling ``time.sleep(0)``. (This may be fixed at +some point.) + +There are also issues with the interaction of locks and atomic blocks. +This can be seen if you write to files (which have locks), including +with a ``print`` to standard output. If one thread tries to acquire a +lock while running in an atomic block, and another thread has got the +same lock, then the former may fail with a ``thread.error``. The reason +is that "waiting" for some condition to become true --while running in +an atomic block-- does not really make sense. For now you can work +around it by making sure that, say, all your prints are either in an +``atomic`` block or none of them are. (This kind of issue is +theoretically hard to solve.) + + +Locks +----- + +**Not Implemented Yet** + +The thread module's locks have their basic semantic unchanged. However, +using them (e.g. in ``with my_lock:`` blocks) starts an alternative +running mode, called `Software lock elision`_. This means that PyPy +will try to make sure that the transaction extends until the point where +the lock is released, and if it succeeds, then the acquiring and +releasing of the lock will be "elided". This means that in this case, +the whole transaction will technically not cause any write into the lock +object --- it was unacquired before, and is still unacquired after the +transaction. + +This is specially useful if two threads run ``with my_lock:`` blocks +with the same lock. If they each run a transaction that is long enough +to contain the whole block, then all writes into the lock will be elided +and the two transactions will not conflict with each other. As usual, +they will be serialized in some order: one of the two will appear to run +before the other. Simply, each of them executes an "acquire" followed +by a "release" in the same transaction. As explained above, the lock +state goes from "unacquired" to "unacquired" and can thus be left +unchanged. + +This approach can gracefully fail: unlike atomic sections, there is no +guarantee that the transaction runs until the end of the block. If you +perform any input/output while you hold the lock, the transaction will +end as usual just before the input/output operation. If this occurs, +then the lock elision mode is cancelled and the lock's "acquired" state +is really written. + +Even if the lock is really acquired already, a transaction doesn't have +to wait for it to become free again. It can enter the elision-mode anyway +and tentatively execute the content of the block. It is only at the end, +when trying to commit, that the thread will pause. As soon as the real +value stored in the lock is switched back to "unacquired", it can then +proceed and attempt to commit its already-executed transaction (which +can fail and abort and restart from the scratch, as usual). + +Note that this is all *not implemented yet,* but we expect it to work +even if you acquire and release several locks. The elision-mode +transaction will extend until the first lock you acquired is released, +or until the code performs an input/output or a wait operation (for +example, waiting for another lock that is currently not free). In the +common case of acquiring several locks in nested order, they will all be +elided by the same transaction. + + +Atomic sections, Transactions, etc.: a better way to write parallel programs +---------------------------------------------------------------------------- + +(This section describes locks as we plan to implement them, but also +works with the existing atomic sections.) + +In the cases where elision works, the block of code can run in parallel +with other blocks of code *even if they are protected by the same lock.* +You still get the illusion that the blocks are run sequentially. This +works even for multiple threads that run each a series of such blocks +and nothing else, protected by one single global lock. This is +basically the Python application-level equivalent of what was done with +the interpreter in ``pypy-stm``: while you think you are writing +thread-unfriendly code because of this global lock, actually the +underlying system is able to make it run on multiple cores anyway. + +... + +``pypy-stm`` enables a better programming model whereby you can run +non-threaded programs on multiple cores, simply by starting multiple +threads but running each of them protected by the same lock. (Note that +"protected by the same lock" means right now "they are all protected by +``__pypy__.thread.atomic``", but this might change in the future.) + +This capability can be hidden in a library or in the framework you use; +the end user's code does not need to be explicitly aware of using +threads. For a simple example of this, there is `transaction.py`_ in +``lib_pypy``. The idea is that you write, or already have, some program +where the function ``f(key, value)`` runs on every item of some big +dictionary, say:: + + for key, value in bigdict.items(): + f(key, value) + +Then you simply replace the loop with:: + + for key, value in bigdict.items(): + transaction.add(f, key, value) transaction.run() - # and 'do_work()' may call more 'transaction.add(do_work, key, value)' -The ``transaction`` module works as if it ran each 'do_work()' call -serially in some unspecified order. Under the hood, it creates a pool -of threads. But this is not visible: each 'do_work()' is run as one -"atomic" block. Multiple atomic block can actually run in parallel, but -behave as if they were run serially. This works as long as they are -doing "generally independent" things. More details about this later. +This code runs the various calls to ``f(key, value)`` using a thread +pool, but every single call is executed under the protection of a unique +lock. The end result is that the behavior is exactly equivalent --- in +fact it makes little sense to do it in this way on a non-STM PyPy or on +CPython. But on ``pypy-stm``, the various locked calls to ``f(key, +value)`` can tentatively be executed in parallel, even if the observable +result is as if they were executed in some serial order. -The module is written in pure Python (`lib_pypy/transaction.py`_). -See the source code to see how it is based on the `low-level interface`_. +This approach hides the notion of threads from the end programmer, +including all the hard multithreading-related issues. This is not the +first alternative approach to explicit threads; for example, OpenMP_ is +one. However, it is one of the first ones which does not require the +code to be organized in a particular fashion. Instead, it works on any +Python program which has got latent, imperfect parallelism. Ideally, it +only requires that the end programmer identifies where this parallelism +is likely to be found, and communicates it to the system, using for +example the ``transaction.add()`` scheme. + +.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py +.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP -Low-level interface -=================== +API of transactional_memory +--------------------------- -Besides replacing the GIL with STM techniques, ``pypy-stm`` offers one -additional explicit low-level API: ``thread.atomic``. This is a context -manager to use in a ``with`` statement. Any code running in the ``with -thread.atomic`` block is guaranteed to be fully serialized with respect -to any code run by other threads (so-called *strong isolation*). +The new pure Python module ``transactional_memory`` runs on both CPython +and PyPy, both with and without STM. It contains: -Note that this is a *guarantee of observed behavior:* under the conditions -described below, a ``thread.atomic`` block can internally run in parallel -with other threads, whether they are in a ``thread.atomic`` or not. But -the behavior is as if the threads don't overlap. +* ``getsegmentlimit()``: return the number of "segments" in + this pypy-stm. This is the limit above which more threads will not be + able to execute on more cores. (Right now it is limited to 4 due to + inter-segment overhead, but should be increased in the future. It + should also be settable, and the default value should depend on the + number of actual CPUs.) If STM is not available, this returns 1. -Classical minimal example: in a thread, you want to pop an item from -``list1`` and append it to ``list2``, knowing that both lists can be -mutated concurrently by other threads. Using ``thread.atomic`` this can -be done without careful usage of locks on all the mutations of the lists:: +* ``print_abort_info(minimum_time=0.0)``: debugging help. Each thread + remembers the longest abort or pause it did because of cross-thread + contention_. This function prints it to ``stderr`` if the time lost + is greater than ``minimum_time`` seconds. The record is then + cleared, to make it ready for new events. This function returns + ``True`` if it printed a report, and ``False`` otherwise. - with thread.atomic: - x = list1.pop() - list2.append(x) -Note that, unlike this minimal example, the expected usage is that -``thread.atomic`` blocks are potentially very complex and long-running. -This is what you typically get with the `high-level interface`_. +API of __pypy__.thread +---------------------- +The ``__pypy__.thread`` submodule is a built-in module of PyPy that +contains a few internal built-in functions used by the +``transactional_memory`` module. -Interaction with locks -====================== +* ``__pypy__.thread.signals_enabled``: a context manager that runs its + block with signals enabled. By default, signals are only enabled in + the main thread; a non-main thread will not receive signals (this is + like CPython). Enabling signals in non-main threads is useful for + libraries where threads are hidden and the end user is not expecting + his code to run elsewhere than in the main thread. -Existing multi-threaded programs usually rely on locks, either directly -from ``thread.allocate_lock()`` or by using variants from the -``threading`` module. Actually, some operations in the interpreter -itself acquire locks internally too; most notably, any file access does. -These locks work fine in ``pypy-stm`` either outside ``thread.atomic`` -blocks or inside ``thread.atomic`` blocks. However, due to hard -technical issues, it is not really possible for them to work correctly -if a ``thread.atomic`` block tries to acquire a lock that has already -been acquired outside. In that situation (only), trying to acquire the -lock will raise ``thread.error``. +Conflicts +--------- -Importantly, note that this is not issue with the `high-level -interface`_, but only if you use ``thread.atomic`` directly. In the -high-level interface, the running code is either single-threaded -(outside ``transaction.run()``) or systematically running in -``thread.atomic`` blocks. +Based on Software Transactional Memory, the ``pypy-stm`` solution is +prone to "conflicts". The basic idea is that threads execute their code +speculatively, and at known points (e.g. between bytecodes) they +coordinate with each other to agree on which order their respective +actions should be "committed", i.e. become globally visible. Each +duration of time between two commit-points is called a "transaction" +(this is related to, but slightly different from, the transactions +above). -If you *are* using ``thread.atomic`` directly, then a common way for -this issue to show up is using ``print`` statements: this is due to the -internal lock on ``stdout``. You are free to use ``print`` either -outside ``thread.atomic`` blocks or inside them, but not both -concurrently. A way to fix this is to put all ``print`` statements -inside ``thread.atomic`` blocks, by writing this kind of code:: +A conflict occurs when there is no consistent ordering. The classical +example is if two threads both tried to change the value of the same +global variable. In that case, only one of them can be allowed to +proceed, and the other one must be either paused or aborted (restarting +the transaction). - with thread.atomic: - print "hello, the value is:", value -Note that this actually also helps ensuring that the whole line (or -lines) is printed atomically, instead of being broken up with -interleaved output from other threads. -In this case, it is always a good idea to protect ``print`` statements -with ``thread.atomic``. The reason it is not done automatically is that -it is not useful for the high-level interface, and moreover not all file -operations would benefit: if you have a read or write that may block, -putting it in a ``thread.atomic`` would have the negative effect of -suspending all other threads while we wait for the call to complete, as -described next__. + + + + + + + +Implementation +============== + + .. __: Parallelization_ @@ -252,3 +509,70 @@ .. include:: _ref.txt + + + +---------------++++++++++++++++++++++++-------------------- + + +with lock: + sleep(1) + + +option 1: lock.is_acquired is never touched, and all is done +atomically; from the sleep() it is also inevitable; however other +transactions can commit other "with lock" blocks as long as it goes +into the past, so progress is not hindered if the other thread never +needs inevitable; drawback = no other inevitable allowed + +option 2: lock.is_acquired=True is realized by the sleep() and the +transaction commits; then other transactions cannot commit if they +elided an acquire() until we have a real write to +lock.is_acquired=False again; in the common case we need to make the +transaction longer, to try to go until the release of the lock + + + + +Low-level statistics +-------------------- + +When a non-main thread finishes, you get low-level statistics printed to +stderr, looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + sync commit soon 1 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + longest recordered marker 0.000826 s + "File "x.py", line 5, in f" + +On each line, the first number is a counter, and the second number gives +the associated time --- the amount of real time that the thread was in +this state. The sum of all the times should be equal to the total time +between the thread's start and the thread's end. The most important +points are "run committed", which gives the amount of useful work, and +"outside transaction", which should give the time spent e.g. in library +calls (right now it seems to be larger than that; to investigate). The +various "run aborted" and "wait" entries are time lost due to +conflicts_. Everything else is overhead of various forms. (Short-, +medium- and long-term future work involves reducing this overhead :-) + +The last two lines are special; they are an internal marker read by +`transactional_memory.print_longest_marker`_. + +These statistics are not printed out for the main thread, for now. + From noreply at buildbot.pypy.org Tue May 6 19:03:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:03:16 +0200 (CEST) Subject: [pypy-commit] pypy default: document when the #pypy IRC topic will be displayed Message-ID: <20140506170316.D5FA31D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71336:4523d57c028e Date: 2014-05-06 19:34 +0300 http://bitbucket.org/pypy/pypy/changeset/4523d57c028e/ Log: document when the #pypy IRC topic will be displayed diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -348,4 +348,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* PyPy prints a random line from past #pypy IRC topics at startup in + interactive mode. In a released version, this behaviour is supressed, but + setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that + downstream package providers have been known to totally disable this feature. + .. include:: _ref.txt From noreply at buildbot.pypy.org Tue May 6 19:03:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:03:18 +0200 (CEST) Subject: [pypy-commit] pypy default: test for irc topic message only on release versions Message-ID: <20140506170318.367521D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71337:10affda0b314 Date: 2014-05-06 20:00 +0300 http://bitbucket.org/pypy/pypy/changeset/10affda0b314/ Log: test for irc topic message only on release versions diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,6 +7,11 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir +from pypy.module.sys.version import PYPY_VERSION +from lib_pypy._pypy_interact import irc_header + +is_release = PYPY_VERSION[3] == "final" + banner = sys.version.splitlines()[0] @@ -241,6 +246,10 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt + if is_release: + assert irc_header not in child.before + else: + assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') From noreply at buildbot.pypy.org Tue May 6 19:03:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:03:19 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: document when the #pypy IRC topic will be displayed Message-ID: <20140506170319.7886F1D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71338:5730f35707f6 Date: 2014-05-06 19:34 +0300 http://bitbucket.org/pypy/pypy/changeset/5730f35707f6/ Log: document when the #pypy IRC topic will be displayed diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -348,4 +348,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* PyPy prints a random line from past #pypy IRC topics at startup in + interactive mode. In a released version, this behaviour is supressed, but + setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that + downstream package providers have been known to totally disable this feature. + .. include:: _ref.txt From noreply at buildbot.pypy.org Tue May 6 19:03:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:03:20 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: test for irc topic message only on release versions Message-ID: <20140506170320.A9F621D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71339:5a1c0d473144 Date: 2014-05-06 20:00 +0300 http://bitbucket.org/pypy/pypy/changeset/5a1c0d473144/ Log: test for irc topic message only on release versions diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,6 +7,11 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir +from pypy.module.sys.version import PYPY_VERSION +from lib_pypy._pypy_interact import irc_header + +is_release = PYPY_VERSION[3] == "final" + banner = sys.version.splitlines()[0] @@ -241,6 +246,10 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt + if is_release: + assert irc_header not in child.before + else: + assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') From noreply at buildbot.pypy.org Tue May 6 19:03:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:03:21 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: revert improper doc change Message-ID: <20140506170321.DDA5A1D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71340:d08113afca40 Date: 2014-05-06 20:02 +0300 http://bitbucket.org/pypy/pypy/changeset/d08113afca40/ Log: revert improper doc change diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -373,12 +373,6 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -PYPY_IRC_TOPIC -+++++++++++++++ - -Set the ``PYPY_IRC_TOPIC`` environment variable to a non-empty string -to print a random #pypy IRC topic at startup of interactive mode. - Getting involved ----------------- From noreply at buildbot.pypy.org Tue May 6 19:07:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:07:20 +0200 (CEST) Subject: [pypy-commit] pypy default: change release name Message-ID: <20140506170720.935FB1D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71341:8d98124a8e41 Date: 2014-05-06 20:06 +0300 http://bitbucket.org/pypy/pypy/changeset/8d98124a8e41/ Log: change release name diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,5 +1,5 @@ ======================================= -PyPy 2.3 - Easier Than Ever +PyPy 2.3 - Terrestrial Arthropod Trap ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python From noreply at buildbot.pypy.org Tue May 6 19:07:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 May 2014 19:07:21 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: change release name Message-ID: <20140506170721.ED3B81D2A4F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71342:4fe43149922a Date: 2014-05-06 20:06 +0300 http://bitbucket.org/pypy/pypy/changeset/4fe43149922a/ Log: change release name diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,5 +1,5 @@ ======================================= -PyPy 2.3 - Easier Than Ever +PyPy 2.3 - Terrestrial Arthropod Trap ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python From noreply at buildbot.pypy.org Tue May 6 23:06:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 23:06:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: Tweak Message-ID: <20140506210614.528E21C317E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1201:fb2bc9a3419a Date: 2014-05-06 21:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/fb2bc9a3419a/ Log: Tweak diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -260,6 +260,11 @@ realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + if (nursery_used > NB_NURSERY_PAGES * 4096) { + /* possible in rare cases when the program artificially advances + its own nursery_current */ + nursery_used = NB_NURSERY_PAGES * 4096; + } OPT_ASSERT((nursery_used & 7) == 0); memset(realnursery, 0, nursery_used); From noreply at buildbot.pypy.org Tue May 6 23:06:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 23:06:54 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/fb2bc9a3419a Message-ID: <20140506210654.1A5831C317E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71343:2ec62c77cfdf Date: 2014-05-06 21:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2ec62c77cfdf/ Log: import stmgc/fb2bc9a3419a diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -84f5fbe03d5d +fb2bc9a3419a diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -261,6 +261,11 @@ realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + if (nursery_used > NB_NURSERY_PAGES * 4096) { + /* possible in rare cases when the program artificially advances + its own nursery_current */ + nursery_used = NB_NURSERY_PAGES * 4096; + } OPT_ASSERT((nursery_used & 7) == 0); memset(realnursery, 0, nursery_used); From noreply at buildbot.pypy.org Tue May 6 23:06:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 23:06:55 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Increment nursery_current when requested Message-ID: <20140506210655.5E2C21C317E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71344:26bb66aa78ab Date: 2014-05-06 21:13 +0200 http://bitbucket.org/pypy/pypy/changeset/26bb66aa78ab/ Log: Increment nursery_current when requested diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2547,7 +2547,7 @@ assert isinstance(reg, RegLoc) self.mc.MOV_rr(reg.value, ebp.value) - def _generate_cmp_break_transaction(self): + def _generate_cmp_break_transaction(self, increase_nursery=False): # emits the check with a CMP instruction: # pypy_stm_nursery_low_fill_mark < STM_SEGMENT->nursery_current # so if it is followed with a JB, it will follow the jump if @@ -2556,25 +2556,30 @@ if not IS_X86_64: todo() # "needed for X86_64_SCRATCH_REG" psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark - self.mc.MOV(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) - nf_adr = rstm.adr_nursery_free - assert rx86.fits_in_32bits(nf_adr) # because it is in the 2nd page - self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) + nf_adr = rstm.adr_nursery_free # STM_SEGMENT->nursery_current + assert rx86.fits_in_32bits(nf_adr) # nf_adr is in page 1 + self.mc.MOV_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) + if increase_nursery: + self.mc.ADD_ri(X86_64_SCRATCH_REG.value, WORD) + self.mc.MOV_jr((self.SEGMENT_GC, nf_adr), X86_64_SCRATCH_REG.value) + self.mc.CMP(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) def genop_stm_should_break_transaction(self, op, arglocs, result_loc): - self._generate_cmp_break_transaction() + increase_nursery = op.args[0].getint() + self._generate_cmp_break_transaction(increase_nursery=increase_nursery) rl = result_loc.lowest8bits() - self.mc.SET_ir(rx86.Conditions['B'], rl.value) + self.mc.SET_ir(rx86.Conditions['A'], rl.value) self.mc.MOVZX8_rr(result_loc.value, rl.value) def genop_guard_stm_should_break_transaction(self, op, guard_op, guard_token, arglocs, result_loc): - self._generate_cmp_break_transaction() + increase_nursery = op.args[0].getint() + self._generate_cmp_break_transaction(increase_nursery=increase_nursery) if guard_op.getopnum() == rop.GUARD_FALSE: - self.implement_guard(guard_token, 'B') # JB goes to "yes, break" + self.implement_guard(guard_token, 'A') # JA goes to "yes, break" else: - self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't" + self.implement_guard(guard_token, 'BE') # JBE goes to "no, don't" def genop_guard_stm_transaction_break(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2587,9 +2592,9 @@ mc = self.mc self._generate_cmp_break_transaction() - # use JAE to jump over the following piece of code if we don't need + # use JBE to jump over the following piece of code if we don't need # to break the transaction now - mc.J_il(rx86.Conditions['AE'], 0xfffff) # patched later + mc.J_il(rx86.Conditions['BE'], 0xfffff) # patched later jae_location = mc.get_relative_pos() # This is the case in which we have to do the same as the logic From noreply at buildbot.pypy.org Tue May 6 23:06:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 23:06:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Typo Message-ID: <20140506210656.7D3191C317E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71345:ae24f3c96638 Date: 2014-05-06 23:06 +0200 http://bitbucket.org/pypy/pypy/changeset/ae24f3c96638/ Log: Typo diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2565,7 +2565,7 @@ self.mc.CMP(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) def genop_stm_should_break_transaction(self, op, arglocs, result_loc): - increase_nursery = op.args[0].getint() + increase_nursery = op.getarg(0).getint() self._generate_cmp_break_transaction(increase_nursery=increase_nursery) rl = result_loc.lowest8bits() self.mc.SET_ir(rx86.Conditions['A'], rl.value) @@ -2574,7 +2574,7 @@ def genop_guard_stm_should_break_transaction(self, op, guard_op, guard_token, arglocs, result_loc): - increase_nursery = op.args[0].getint() + increase_nursery = op.getarg(0).getint() self._generate_cmp_break_transaction(increase_nursery=increase_nursery) if guard_op.getopnum() == rop.GUARD_FALSE: self.implement_guard(guard_token, 'A') # JA goes to "yes, break" From noreply at buildbot.pypy.org Tue May 6 23:11:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 May 2014 23:11:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test and fix Message-ID: <20140506211132.723CA1C317E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71346:b04b034d97fa Date: 2014-05-06 23:11 +0200 http://bitbucket.org/pypy/pypy/changeset/b04b034d97fa/ Log: Test and fix diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -26,7 +26,7 @@ return # ---------- transaction breaks ---------- if opnum == rop.STM_SHOULD_BREAK_TRANSACTION: - self.handle_should_break_transaction() + self.handle_should_break_transaction(op) return if opnum == rop.STM_TRANSACTION_BREAK: self.emitting_an_operation_that_can_collect() @@ -120,9 +120,10 @@ self.newops.append(op1) self.read_barrier_applied[v_ptr] = None - def handle_should_break_transaction(self): + def handle_should_break_transaction(self, op): op1 = ResOperation(rop.STM_SHOULD_BREAK_TRANSACTION, - [ConstInt(not self.does_any_allocation)], None) + [ConstInt(not self.does_any_allocation)], + op.result) self.newops.append(op1) self.does_any_allocation = True diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1233,31 +1233,37 @@ def test_stm_should_break_transaction_no_malloc(self): self.check_rewrite(""" [] - stm_should_break_transaction(0) + i1 = stm_should_break_transaction(0) + jump(i1) """, """ [] - stm_should_break_transaction(1) + i1 = stm_should_break_transaction(1) + jump(i1) """) def test_stm_should_break_transaction_with_malloc(self): self.check_rewrite(""" [] p2 = new(descr=tdescr) - stm_should_break_transaction(0) + i1 = stm_should_break_transaction(0) + jump(i1) """, """ [] p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) - stm_should_break_transaction(0) + i1 = stm_should_break_transaction(0) + jump(i1) """) def test_double_stm_should_break_allocation(self): self.check_rewrite(""" [] - stm_should_break_transaction(0) - stm_should_break_transaction(0) + i1 = stm_should_break_transaction(0) + i2 = stm_should_break_transaction(0) + jump(i1, i2) """, """ [] - stm_should_break_transaction(1) - stm_should_break_transaction(0) + i1 = stm_should_break_transaction(1) + i2 = stm_should_break_transaction(0) + jump(i1, i2) """) From noreply at buildbot.pypy.org Wed May 7 00:00:42 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 7 May 2014 00:00:42 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: remove long double support during translation for now (it produces uncompilable code) Message-ID: <20140506220042.C92B91D2CEF@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71347:f79785955a7d Date: 2014-05-06 14:59 -0700 http://bitbucket.org/pypy/pypy/changeset/f79785955a7d/ Log: remove long double support during translation for now (it produces uncompilable code) diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -43,3 +43,9 @@ capi.verify_backend(space) # may raise ImportError space.call_method(space.wrap(self), '_init_pythonify') + + def setup_after_space_initialization(self): + """NOT_RPYTHON""" + from pypy.module.cppyy import converter, executor + converter._install_converters(self.space) + executor._install_executors(self.space) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat -from rpython.rlib import jit, jit_libffi, rfloat +from rpython.rlib import jit_libffi, objectmodel, rfloat from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance @@ -216,6 +216,9 @@ class NumericTypeConverterMixin(object): _mixin_ = True + def _wrap_object(self, space, obj): + return space.wrap(obj) + def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) x[0] = self._unwrap_object(space, w_obj) @@ -227,7 +230,7 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = self._get_raw_address(space, w_obj, offset) rffiptr = rffi.cast(self.c_ptrtype, address) - return space.wrap(rffiptr[0]) + return self._wrap_object(space, rffiptr[0]) def to_memory(self, space, w_obj, w_value, offset): address = self._get_raw_address(space, w_obj, offset) @@ -248,7 +251,7 @@ x[0] = call_local def to_memory(self, space, w_obj, w_value, offset): - self._is_abstract(space) + raise OperationError(space.w_TypeError, space.wrap("can not assign to reference")) class IntTypeConverterMixin(NumericTypeConverterMixin): @@ -398,86 +401,30 @@ typecode = 'D' -class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): - _immutable_fields_ = ['default', 'typecode'] - typecode = 'Q' +if not objectmodel.we_are_translated(): + # r_longfloat isn't supported in translation + class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): + _immutable_fields_ = ['default', 'typecode'] + typecode = 'Q' - @jit.dont_look_inside - def __init__(self, space, default): - # TODO: loses precision - if default: - self.default = rffi.cast(self.c_type, rfloat.rstring_to_float(default)) - else: - self.default = rffi.cast(self.c_type, 0.) + def __init__(self, space, default): + # TODO: loses precision + if default: + self.default = rffi.cast(self.c_type, rfloat.rstring_to_float(default)) + else: + self.default = rffi.cast(self.c_type, 0.) - def default_argument_libffi(self, space, address): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible - raise FastCallNotPossible + def _wrap_object(self, space, obj): + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(obj)) - @jit.dont_look_inside - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = self._get_raw_address(space, w_obj, offset) - rffiptr = rffi.cast(self.c_ptrtype, address) - # TODO: this loses precision, but r_longfloat can not be wrapped - return space.wrap(float(rffiptr[0])) + class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): + _immutable_fields_ = ['libffitype'] + libffitype = jit_libffi.types.pointer - # repeats to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" - @jit.dont_look_inside - def convert_argument(self, space, w_obj, address, call_local): - x = rffi.cast(self.c_ptrtype, address) - x[0] = self._unwrap_object(space, w_obj) - ba = rffi.cast(rffi.CCHARP, address) - ba[capi.c_function_arg_typeoffset(space)] = self.typecode - - @jit.dont_look_inside - def convert_argument_libffi(self, space, w_obj, address, call_local): - x = rffi.cast(self.c_ptrtype, address) - x[0] = self._unwrap_object(space, w_obj) - - @jit.dont_look_inside - def default_argument_libffi(self, space, address): - x = rffi.cast(self.c_ptrtype, address) - x[0] = self.default - - @jit.dont_look_inside - def to_memory(self, space, w_obj, w_value, offset): - address = self._get_raw_address(space, w_obj, offset) - rffiptr = rffi.cast(self.c_ptrtype, address) - rffiptr[0] = self._unwrap_object(space, w_value) - - -class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): - _immutable_fields_ = ['libffitype'] - libffitype = jit_libffi.types.pointer - - def convert_argument_libffi(self, space, w_obj, address, call_local): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible - raise FastCallNotPossible - - def default_argument_libffi(self, space, address): - # suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible - raise FastCallNotPossible - - @jit.dont_look_inside - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = self._get_raw_address(space, w_obj, offset) - rffiptr = rffi.cast(self.c_ptrtype, address) - # TODO: this loses precision, but r_longfloat can not be wrapped - return space.wrap(float(rffiptr[0])) - - # repeatss to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" - @jit.dont_look_inside - def convert_argument(self, space, w_obj, address, call_local): - x = rffi.cast(self.c_ptrtype, address) - x[0] = self._unwrap_object(space, w_obj) - ba = rffi.cast(rffi.CCHARP, address) - ba[capi.c_function_arg_typeoffset(space)] = self.typecode - - @jit.dont_look_inside - def default_argument_libffi(self, space, address): - x = rffi.cast(self.c_ptrtype, address) - x[0] = self.default + def _wrap_object(self, space, obj): + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(obj)) class CStringConverter(TypeConverter): @@ -812,32 +759,94 @@ return VoidConverter(space, name) -_converters["bool"] = BoolConverter -_converters["char"] = CharConverter -_converters["const char&"] = ConstRefCharConverter -_converters["signed char"] = SCharConverter -_converters["const signed char&"] = ConstRefSCharConverter -_converters["float"] = FloatConverter -_converters["const float&"] = ConstFloatRefConverter -_converters["double"] = DoubleConverter -_converters["const double&"] = ConstDoubleRefConverter -_converters["long double"] = LongDoubleConverter -_converters["const long double&"] = ConstLongDoubleRefConverter -_converters["const char*"] = CStringConverter -_converters["void*"] = VoidPtrConverter -_converters["void**"] = VoidPtrPtrConverter -_converters["void*&"] = VoidPtrRefConverter +def _install_converters(space): + "NOT_RPYTHON" + global _converters + if not space.config.translating: + # r_longfloat isn't supported in translation + class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): + _immutable_fields_ = ['default', 'typecode'] + typecode = 'Q' -# special cases (note: 'string' aliases added below) -_converters["std::basic_string"] = StdStringConverter -_converters["const std::basic_string&"] = StdStringConverter # TODO: shouldn't copy -_converters["std::basic_string&"] = StdStringRefConverter + def __init__(self, space, default): + # TODO: loses precision + if default: + self.default = rffi.cast(self.c_type, rfloat.rstring_to_float(default)) + else: + self.default = rffi.cast(self.c_type, 0.) -_converters["PyObject*"] = PyObjectConverter + def _wrap_object(self, space, obj): + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(obj)) -_converters["#define"] = MacroConverter + class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): + _immutable_fields_ = ['libffitype'] + libffitype = jit_libffi.types.pointer -# add basic (builtin) converters + def _wrap_object(self, space, obj): + # TODO: this loses precision, but r_longfloat can not be wrapped + return space.wrap(float(obj)) + + _converters["bool"] = BoolConverter + _converters["char"] = CharConverter + _converters["const char&"] = ConstRefCharConverter + _converters["signed char"] = SCharConverter + _converters["const signed char&"] = ConstRefSCharConverter + _converters["float"] = FloatConverter + _converters["const float&"] = ConstFloatRefConverter + _converters["double"] = DoubleConverter + _converters["const double&"] = ConstDoubleRefConverter + if not space.config.translating: + # r_longfloat isn't supported in translation + _converters["long double"] = LongDoubleConverter + _converters["const long double&"] = ConstLongDoubleRefConverter + _converters["const char*"] = CStringConverter + _converters["void*"] = VoidPtrConverter + _converters["void**"] = VoidPtrPtrConverter + _converters["void*&"] = VoidPtrRefConverter + + # special cases (note: 'string' aliases added below) + _converters["std::basic_string"] = StdStringConverter + _converters["const std::basic_string&"] = StdStringConverter # TODO: shouldn't copy + _converters["std::basic_string&"] = StdStringRefConverter + + _converters["PyObject*"] = PyObjectConverter + + _converters["#define"] = MacroConverter + + # add basic (builtin) converters + _build_basic_converters() + + # create the array and pointer converters; all real work is in the mixins + _build_array_converters() + + # add another set of aliased names + _add_aliased_converters() + + # ROOT-specific converters (TODO: this is a general use case and should grow + # an API; putting it here is done only to circumvent circular imports) + if capi.identify() == "CINT": + + class TStringConverter(InstanceConverter): + def __init__(self, space, extra): + from pypy.module.cppyy import interp_cppyy + cppclass = interp_cppyy.scope_byname(space, "TString") + InstanceConverter.__init__(self, space, cppclass) + + def _unwrap_object(self, space, w_obj): + from pypy.module.cppyy import interp_cppyy + if isinstance(w_obj, interp_cppyy.W_CPPInstance): + arg = InstanceConverter._unwrap_object(self, space, w_obj) + return capi.backend.c_TString2TString(space, arg) + else: + return capi.backend.c_charp2TString(space, space.str_w(w_obj)) + + def free_argument(self, space, arg, call_local): + capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) + + _converters["TString"] = TStringConverter + _converters["const TString&"] = TStringConverter + def _build_basic_converters(): "NOT_RPYTHON" # signed types (use strtoll in setting of default in __init__) @@ -904,9 +913,7 @@ for name in names: _converters[name] = BasicConverter _converters["const "+name+"&"] = ConstRefConverter -_build_basic_converters() -# create the array and pointer converters; all real work is in the mixins def _build_array_converters(): "NOT_RPYTHON" array_info = ( @@ -931,9 +938,7 @@ for name in names: _a_converters[name+'[]'] = ArrayConverter _a_converters[name+'*'] = PtrConverter -_build_array_converters() -# add another set of aliased names def _add_aliased_converters(): "NOT_RPYTHON" aliases = ( @@ -950,31 +955,6 @@ ("PyObject*", "_object*"), ) - + for c_type, alias in aliases: _converters[alias] = _converters[c_type] -_add_aliased_converters() - -# ROOT-specific converters (TODO: this is a general use case and should grow -# an API; putting it here is done only to circumvent circular imports) -if capi.identify() == "CINT": - - class TStringConverter(InstanceConverter): - def __init__(self, space, extra): - from pypy.module.cppyy import interp_cppyy - cppclass = interp_cppyy.scope_byname(space, "TString") - InstanceConverter.__init__(self, space, cppclass) - - def _unwrap_object(self, space, w_obj): - from pypy.module.cppyy import interp_cppyy - if isinstance(w_obj, interp_cppyy.W_CPPInstance): - arg = InstanceConverter._unwrap_object(self, space, w_obj) - return capi.backend.c_TString2TString(space, arg) - else: - return capi.backend.c_charp2TString(space, space.str_w(w_obj)) - - def free_argument(self, space, arg, call_local): - capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) - - _converters["TString"] = TStringConverter - _converters["const TString&"] = TStringConverter diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -122,49 +122,6 @@ rffi.cast(self.c_ptrtype, rffi.cast(rffi.VOIDPP, result)[0])) -class LongDoubleExecutor(ffitypes.typeid(rffi.LONGDOUBLE), NumericExecutorMixin, FunctionExecutor): - # exists to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" - _immutable_ = True - c_stubcall = staticmethod(capi.c_call_ld) - - def _wrap_object(self, space, obj): - # TODO: this loses precision, but r_longfloat can not be wrapped - return space.wrap(float(obj)) - - @jit.dont_look_inside - def execute(self, space, cppmethod, cppthis, num_args, args): - result = self.c_stubcall(space, cppmethod, cppthis, num_args, args) - return self._wrap_object(space, rffi.cast(self.c_type, result)) - - def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible - raise FastCallNotPossible - -class LongDoubleRefExecutor(ffitypes.typeid(rffi.LONGDOUBLE), - NumericRefExecutorMixin, FunctionExecutor): - # exists to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" - _immutable_fields_ = ['libffitype'] - libffitype = jit_libffi.types.pointer - - @jit.dont_look_inside - def set_item(self, space, w_item): - self.item = self._unwrap_object(space, w_item) - self.do_assign = True - - def _wrap_object(self, space, obj): - # TODO: this loses precision, but r_longfloat can not be wrapped - return space.wrap(float(rffi.cast(self.c_type, obj))) - - @jit.dont_look_inside - def execute(self, space, cppmethod, cppthis, num_args, args): - result = capi.c_call_r(space, cppmethod, cppthis, num_args, args) - return self._wrap_reference(space, rffi.cast(self.c_ptrtype, result)) - - def execute_libffi(self, space, cif_descr, funcaddr, buffer): - from pypy.module.cppyy.interp_cppyy import FastCallNotPossible - raise FastCallNotPossible - - class CStringExecutor(FunctionExecutor): def execute(self, space, cppmethod, cppthis, num_args, args): @@ -335,25 +292,36 @@ # currently used until proper lazy instantiation available in interp_cppyy return FunctionExecutor(space, None) - -_executors["void"] = VoidExecutor -_executors["void*"] = PtrTypeExecutor -_executors["const char*"] = CStringExecutor -# special cases (note: 'string' aliases added below) -_executors["constructor"] = ConstructorExecutor +def _install_executors(space): + "NOT_RPYTHON" + global _executors + _executors["void"] = VoidExecutor + _executors["void*"] = PtrTypeExecutor + _executors["const char*"] = CStringExecutor -_executors["std::basic_string"] = StdStringExecutor -_executors["const std::basic_string&"] = StdStringRefExecutor -_executors["std::basic_string&"] = StdStringRefExecutor + # special cases (note: 'string' aliases added below) + _executors["constructor"] = ConstructorExecutor -_executors["PyObject*"] = PyObjectExecutor + _executors["std::basic_string"] = StdStringExecutor + _executors["const std::basic_string&"] = StdStringRefExecutor + _executors["std::basic_string&"] = StdStringRefExecutor -# add basic (builtin) executors -def _build_basic_executors(): + _executors["PyObject*"] = PyObjectExecutor + + # add basic (builtin) executors + _build_basic_executors(space) + + # add pointer type executors + _build_ptr_executors() + + # add another set of aliased names + _add_aliased_executors() + +def _build_basic_executors(space): "NOT_RPYTHON" - type_info = ( + type_info = [ (bool, capi.c_call_b, ("bool",)), (rffi.CHAR, capi.c_call_c, ("char", "unsigned char")), (rffi.SIGNEDCHAR, capi.c_call_c, ("signed char",)), @@ -366,7 +334,10 @@ (rffi.ULONGLONG, capi.c_call_ll, ("unsigned long long", "unsigned long long int")), (rffi.FLOAT, capi.c_call_f, ("float",)), (rffi.DOUBLE, capi.c_call_d, ("double",)), - ) + ] + + if not space.config.translating: + type_info.append((rffi.LONGDOUBLE, capi.c_call_ld, ("long double",))) for c_type, stub, names in type_info: class BasicExecutor(ffitypes.typeid(c_type), NumericExecutorMixin, FunctionExecutor): @@ -379,11 +350,6 @@ _executors[name] = BasicExecutor _executors[name+'&'] = BasicRefExecutor _executors['const '+name+'&'] = BasicRefExecutor # no copy needed for builtins - # exists to suppress: "[jitcodewriter:WARNING] type LongFloat is too large, ignoring graph" - _executors["long double"] = LongDoubleExecutor - _executors["long double&"] = LongDoubleRefExecutor - _executors["const long double&"] = LongDoubleRefExecutor -_build_basic_executors() # create the pointer executors; all real work is in the PtrTypeExecutor, since # all pointer types are of the same size @@ -407,9 +373,7 @@ typecode = tcode for name in names: _executors[name+'*'] = PtrExecutor -_build_ptr_executors() -# add another set of aliased names def _add_aliased_executors(): "NOT_RPYTHON" aliases = ( @@ -424,4 +388,3 @@ for c_type, alias in aliases: _executors[alias] = _executors[c_type] -_add_aliased_executors() diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -103,7 +103,12 @@ self.user_del_action = FakeUserDelAction(self) class dummy: pass self.config = dummy() - self.config.translating = False + self.config.translating = True + + # otherwise called through setup_after_space_initialization() + from pypy.module.cppyy import converter, executor + converter._install_converters(self) + executor._install_executors(self) def issequence_w(self, w_obj): return True From noreply at buildbot.pypy.org Wed May 7 00:25:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 00:25:29 +0200 (CEST) Subject: [pypy-commit] pypy py3k: tweak the test runner's custom AssertionError to pass Message-ID: <20140506222529.A16CC1D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71348:920ff3e631b2 Date: 2014-05-06 15:23 -0700 http://bitbucket.org/pypy/pypy/changeset/920ff3e631b2/ Log: tweak the test runner's custom AssertionError to pass test_exc.test_doc_and_module diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -197,6 +197,9 @@ w_init = space.wrap(gateway.interp2app_temp(my_init)) w_dict = space.getattr(w_BuiltinAssertionError, space.wrap('__dict__')) w_dict = space.call_method(w_dict, 'copy') + # fixup __module__, since the new type will be is_heaptype() == True + w_dict.setitem_str('__module__', space.getattr(w_BuiltinAssertionError, + space.wrap('__module__'))) space.setitem(w_dict, space.wrap('__init__'), w_init) return space.call_function(w_metaclass, space.wrap('AssertionError'), From noreply at buildbot.pypy.org Wed May 7 00:25:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 00:25:30 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix ztranslations Message-ID: <20140506222530.D78EF1D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71349:d29bf2c80763 Date: 2014-05-06 15:23 -0700 http://bitbucket.org/pypy/pypy/changeset/d29bf2c80763/ Log: fix ztranslations diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -65,7 +65,7 @@ pass class W_MyType(W_MyObject): - name = u"foobar" + name = "foobar" def __init__(self): self.mro_w = [w_some_obj(), w_some_obj()] From noreply at buildbot.pypy.org Wed May 7 00:25:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 00:25:32 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20140506222532.17C2C1D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71350:89063808e736 Date: 2014-05-06 15:24 -0700 http://bitbucket.org/pypy/pypy/changeset/89063808e736/ Log: 2to3 diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -158,15 +158,15 @@ # integer types names = ['short', 'ushort', 'int', 'uint', 'long', 'ulong', 'llong', 'ullong'] for i in range(len(names)): - exec 'c.m_%s = %d' % (names[i],i) + exec('c.m_%s = %d' % (names[i],i)) assert eval('c.get_%s()' % names[i]) == i for i in range(len(names)): - exec 'c.set_%s(%d)' % (names[i],2*i) + exec('c.set_%s(%d)' % (names[i],2*i)) assert eval('c.m_%s' % names[i]) == 2*i for i in range(len(names)): - exec 'c.set_%s_c(%d)' % (names[i],3*i) + exec('c.set_%s_c(%d)' % (names[i],3*i)) assert eval('c.m_%s' % names[i]) == 3*i # float types through functions @@ -191,11 +191,11 @@ atypes = ['h', 'H', 'i', 'I', 'l', 'L' ] for j in range(len(names)): b = array.array(atypes[j], a) - exec 'c.m_%s_array = b' % names[j] # buffer copies + exec('c.m_%s_array = b' % names[j]) # buffer copies for i in range(self.N): assert eval('c.m_%s_array[i]' % names[j]) == b[i] - exec 'c.m_%s_array2 = b' % names[j] # pointer copies + exec('c.m_%s_array2 = b' % names[j]) # pointer copies b[i] = 28 for i in range(self.N): assert eval('c.m_%s_array2[i]' % names[j]) == b[i] @@ -264,14 +264,14 @@ assert c.s_int == -202 assert c.s_uint == 202 assert cppyy_test_data.s_uint == 202 - assert cppyy_test_data.s_long == -303L - assert c.s_long == -303L - assert c.s_ulong == 303L - assert cppyy_test_data.s_ulong == 303L - assert cppyy_test_data.s_llong == -404L - assert c.s_llong == -404L - assert c.s_ullong == 505L - assert cppyy_test_data.s_ullong == 505L + assert cppyy_test_data.s_long == -303 + assert c.s_long == -303 + assert c.s_ulong == 303 + assert cppyy_test_data.s_ulong == 303 + assert cppyy_test_data.s_llong == -404 + assert c.s_llong == -404 + assert c.s_ullong == 505 + assert cppyy_test_data.s_ullong == 505 # floating point types assert round(cppyy_test_data.s_float + 606., 5) == 0 @@ -321,14 +321,14 @@ assert cppyy_test_data.s_uint == 4321 raises(ValueError, setattr, c, 's_uint', -1) raises(ValueError, setattr, cppyy_test_data, 's_uint', -1) - cppyy_test_data.s_long = -87L - assert c.s_long == -87L - c.s_long = 876L - assert cppyy_test_data.s_long == 876L - cppyy_test_data.s_ulong = 876L - assert c.s_ulong == 876L - c.s_ulong = 678L - assert cppyy_test_data.s_ulong == 678L + cppyy_test_data.s_long = -87 + assert c.s_long == -87 + c.s_long = 876 + assert cppyy_test_data.s_long == 876 + cppyy_test_data.s_ulong = 876 + assert c.s_ulong == 876 + c.s_ulong = 678 + assert cppyy_test_data.s_ulong == 678 raises(ValueError, setattr, cppyy_test_data, 's_ulong', -1) raises(ValueError, setattr, c, 's_ulong', -1) diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -44,8 +44,6 @@ res = example01_class.staticAddOneToInt(1) assert res == 2 - res = example01_class.staticAddOneToInt(1L) - assert res == 2 res = example01_class.staticAddOneToInt(1, 2) assert res == 4 res = example01_class.staticAddOneToInt(-1) @@ -118,7 +116,7 @@ res = instance.addToStringValue("-12") # TODO: this leaks assert res == "30" - res = instance.staticAddOneToInt(1L) + res = instance.staticAddOneToInt(1) assert res == 2 instance.destruct() From noreply at buildbot.pypy.org Wed May 7 00:25:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 00:25:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: py3 hasattr goes through getattr Message-ID: <20140506222533.4FDE51D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71351:742fbfe69840 Date: 2014-05-06 15:24 -0700 http://bitbucket.org/pypy/pypy/changeset/742fbfe69840/ Log: py3 hasattr goes through getattr diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -108,9 +108,8 @@ # can not access an instance member on the class raises(ReferenceError, getattr, cppyy_test_data, 'm_bool') raises(ReferenceError, getattr, cppyy_test_data, 'm_int') - - assert not hasattr(cppyy_test_data, 'm_bool') - assert not hasattr(cppyy_test_data, 'm_int') + raises(ReferenceError, hasattr, cppyy_test_data, 'm_bool') + raises(ReferenceError, hasattr, cppyy_test_data, 'm_int') c.destruct() From noreply at buildbot.pypy.org Wed May 7 00:25:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 00:25:34 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt d083e472a6ab to py3k: treat get_cleared_operation_error as if it was no Message-ID: <20140506222534.8B94D1D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71352:09705ecd0b75 Date: 2014-05-06 15:24 -0700 http://bitbucket.org/pypy/pypy/changeset/09705ecd0b75/ Log: adapt d083e472a6ab to py3k: treat get_cleared_operation_error as if it was no exception here too diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -52,7 +52,8 @@ def setup_context(self, space): # Implicit exception chaining last_operror = space.getexecutioncontext().sys_exc_info() - if last_operror is None: + if (last_operror is None or + last_operror is get_cleared_operation_error(space)): return # We must normalize the value right now to check for cycles From noreply at buildbot.pypy.org Wed May 7 00:37:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 00:37:35 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: More fixes Message-ID: <20140506223735.D93F91D2A4E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71353:8bd1bd69b25c Date: 2014-05-07 00:36 +0200 http://bitbucket.org/pypy/pypy/changeset/8bd1bd69b25c/ Log: More fixes diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2555,14 +2555,16 @@ # if not IS_X86_64: todo() # "needed for X86_64_SCRATCH_REG" - psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark nf_adr = rstm.adr_nursery_free # STM_SEGMENT->nursery_current assert rx86.fits_in_32bits(nf_adr) # nf_adr is in page 1 self.mc.MOV_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) if increase_nursery: self.mc.ADD_ri(X86_64_SCRATCH_REG.value, WORD) self.mc.MOV_jr((self.SEGMENT_GC, nf_adr), X86_64_SCRATCH_REG.value) - self.mc.CMP(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) + psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark + psnlfm_adr -= stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(psnlfm_adr) # should be %fs-local + self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_TL, psnlfm_adr)) def genop_stm_should_break_transaction(self, op, arglocs, result_loc): increase_nursery = op.getarg(0).getint() From noreply at buildbot.pypy.org Wed May 7 01:57:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 01:57:30 +0200 (CEST) Subject: [pypy-commit] pypy default: customize index's error message to match cpython Message-ID: <20140506235730.6D5F01C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71354:3817c8108103 Date: 2014-05-06 16:56 -0700 http://bitbucket.org/pypy/pypy/changeset/3817c8108103/ Log: customize index's error message to match cpython diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -195,4 +195,5 @@ import operator assert operator.index(42) == 42 assert operator.__index__(42) == 42 - raises(TypeError, operator.index, "abc") + exc = raises(TypeError, operator.index, "abc") + assert str(exc.value) == "'str' object cannot be interpreted as an index" diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -794,13 +794,18 @@ l = ["space.isinstance_w(w_result, %s)" % x for x in checkerspec] checker = " or ".join(l) + if targetname == 'index': + msg = "'%%T' object cannot be interpreted as an index" + else: + msg = "unsupported operand type for %(targetname)s(): '%%T'" + msg = msg % locals() source = """if 1: def %(targetname)s(space, w_obj): w_impl = space.lookup(w_obj, %(specialname)r) if w_impl is None: raise oefmt(space.w_TypeError, - "unsupported operand type for %(targetname)s(): " - "'%%T'", w_obj) + %(msg)r, + w_obj) w_result = space.get_and_call_function(w_impl, w_obj) if %(checker)s: From noreply at buildbot.pypy.org Wed May 7 02:03:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 02:03:31 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140507000331.960651C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71355:f7d70d54b9d9 Date: 2014-05-06 16:58 -0700 http://bitbucket.org/pypy/pypy/changeset/f7d70d54b9d9/ Log: merge default diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -348,4 +348,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* PyPy prints a random line from past #pypy IRC topics at startup in + interactive mode. In a released version, this behaviour is supressed, but + setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that + downstream package providers have been known to totally disable this feature. + .. include:: _ref.txt diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,7 +28,10 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* update README +* merge PYPY_IRC_TOPIC environment variable handling from previous release + in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and + pypy/interpreter/app_main.py so release versions will not print a random + IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,5 +1,5 @@ ======================================= -PyPy 2.3 - Easier Than Ever +PyPy 2.3 - Terrestrial Arthropod Trap ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,6 +7,11 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir +from pypy.module.sys.version import PYPY_VERSION +from lib_pypy._pypy_interact import irc_header + +is_release = PYPY_VERSION[3] == "final" + python3 = os.environ.get("PYTHON3", "python3") @@ -250,6 +255,10 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt + if is_release: + assert irc_header not in child.before + else: + assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -179,6 +179,8 @@ assert operator.index(42) == 42 assert operator.__index__(42) == 42 raises(TypeError, operator.index, "abc") + exc = raises(TypeError, operator.index, "abc") + assert str(exc.value) == "'str' object cannot be interpreted as an index" def test_indexOf(self): import operator diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -1,6 +1,18 @@ +import sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC +if sys.maxsize == 2 ** 63 - 1: + extra = """ + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) + i9 = int_le(i4, 2147483647) + guard_true(i9, descr=...) + """ +else: + extra = "" + + class TestStruct(BaseTestPyPyC): def test_struct_function(self): def main(n): @@ -20,10 +32,7 @@ assert loop.match_by_id("struct", """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - i9 = int_le(i4, 2147483647) - guard_true(i9, descr=...) + %s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -41,7 +50,7 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) + """ % extra) def test_struct_object(self): def main(n): @@ -60,10 +69,7 @@ assert loop.match_by_id('struct', """ guard_not_invalidated(descr=...) # struct.pack - i8 = int_ge(i4, -2147483648) - guard_true(i8, descr=...) - i9 = int_le(i4, 2147483647) - guard_true(i9, descr=...) + %s i11 = int_and(i4, 255) i13 = int_rshift(i4, 8) i14 = int_and(i13, 255) @@ -81,4 +87,4 @@ guard_false(i28, descr=...) i30 = int_lshift(i20, 24) i31 = int_or(i26, i30) - """) + """ % extra) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -632,13 +632,18 @@ l = ["space.isinstance_w(w_result, %s)" % x for x in checkerspec] checker = " or ".join(l) + if targetname == 'index': + msg = "'%%T' object cannot be interpreted as an index" + else: + msg = "unsupported operand type for %(targetname)s(): '%%T'" + msg = msg % locals() source = """if 1: def %(targetname)s(space, w_obj): w_impl = space.lookup(w_obj, %(specialname)r) if w_impl is None: raise oefmt(space.w_TypeError, - "unsupported operand type for %(targetname)s(): " - "'%%T'", w_obj) + %(msg)r, + w_obj) w_result = space.get_and_call_function(w_impl, w_obj) if %(checker)s: diff --git a/rpython/tool/version.py b/rpython/tool/version.py --- a/rpython/tool/version.py +++ b/rpython/tool/version.py @@ -2,7 +2,7 @@ import os from subprocess import Popen, PIPE import rpython -rpythondir = os.path.dirname(os.path.abspath(rpython.__file__)) +rpythondir = os.path.dirname(os.path.realpath(rpython.__file__)) rpythonroot = os.path.dirname(rpythondir) default_retval = '?', '?' diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -537,6 +537,8 @@ pypkgpath = localpath.pypkgpath() if pypkgpath: relpypath = localpath.relto(pypkgpath.dirname) + assert relpypath, ("%r should be relative to %r" % + (localpath, pypkgpath.dirname)) return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/goal/richards.py b/rpython/translator/goal/richards.py --- a/rpython/translator/goal/richards.py +++ b/rpython/translator/goal/richards.py @@ -102,13 +102,13 @@ self.task_waiting = False self.task_holding = False return self - + def waitingWithPacket(self): self.packet_pending = True self.task_waiting = True self.task_holding = False return self - + def isPacketPending(self): return self.packet_pending @@ -154,6 +154,7 @@ self.holdCount = 0 self.qpktCount = 0 +taskWorkArea = TaskWorkArea() class Task(TaskState): @@ -235,7 +236,7 @@ if t is None: raise Exception("Bad task id %d" % id) return t - + # DeviceTask @@ -309,7 +310,7 @@ else: i.control = i.control/2 ^ 0xd008 return self.release(I_DEVB) - + # WorkTask @@ -384,7 +385,7 @@ wkq = None; DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec()); DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec()); - + schedule() if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -170,6 +170,9 @@ ofile = cfile.new(ext=ext) if ofile.relto(udir): return ofile + assert ofile.relto(rpythonroot), ( + "%r should be relative to either %r or %r" % ( + ofile, rpythonroot, udir)) ofile = udir.join(ofile.relto(rpythonroot)) ofile.dirpath().ensure(dir=True) return ofile diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -253,6 +253,9 @@ if fpath.dirpath() == self.makefile_dir: return fpath.basename elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath(): + assert fpath.relto(self.makefile_dir.dirpath()), ( + "%r should be relative to %r" % ( + fpath, self.makefile_dir.dirpath())) path = '../' + fpath.relto(self.makefile_dir.dirpath()) return path.replace('\\', '/') else: From noreply at buildbot.pypy.org Wed May 7 02:03:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 02:03:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: switch to py3's error message Message-ID: <20140507000333.130381C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71356:98a0ff61e8d0 Date: 2014-05-06 17:01 -0700 http://bitbucket.org/pypy/pypy/changeset/98a0ff61e8d0/ Log: switch to py3's error message diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -99,7 +99,7 @@ def __int__(self): return 42 exc = raises(TypeError, bin, D()) - assert "index" in str(exc.value) + assert "integer" in str(exc.value) def test_oct(self): class Foo: diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -180,7 +180,7 @@ assert operator.__index__(42) == 42 raises(TypeError, operator.index, "abc") exc = raises(TypeError, operator.index, "abc") - assert str(exc.value) == "'str' object cannot be interpreted as an index" + assert str(exc.value) == "'str' object cannot be interpreted as an integer" def test_indexOf(self): import operator diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -633,7 +633,7 @@ for x in checkerspec] checker = " or ".join(l) if targetname == 'index': - msg = "'%%T' object cannot be interpreted as an index" + msg = "'%%T' object cannot be interpreted as an integer" else: msg = "unsupported operand type for %(targetname)s(): '%%T'" msg = msg % locals() From noreply at buildbot.pypy.org Wed May 7 02:03:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 02:03:34 +0200 (CEST) Subject: [pypy-commit] pypy py3k: match cpython's error message Message-ID: <20140507000334.704C61C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71357:4ce5be4dfac4 Date: 2014-05-06 17:01 -0700 http://bitbucket.org/pypy/pypy/changeset/4ce5be4dfac4/ Log: match cpython's error message diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1553,10 +1553,10 @@ if space.isinstance_w(w_source, space.w_unicode): from pypy.interpreter.unicodehelper import encode w_source = encode(space, w_source) - source = space.bytes0_w(w_source) + source = space.bytes_w(w_source) flags |= consts.PyCF_IGNORE_COOKIE elif space.isinstance_w(w_source, space.w_bytes): - source = space.bytes0_w(w_source) + source = space.bytes_w(w_source) else: try: buf = space.buffer_w(w_source, space.BUF_SIMPLE) @@ -1565,8 +1565,12 @@ raise raise oefmt(space.w_TypeError, "%s() arg 1 must be a %s object", funcname, what) - source = rstring.assert_str0(buf.as_str()) - return source, flags + source = buf.as_str() + + if '\x00' in source: + raise oefmt(space.w_TypeError, + "source code string cannot contain null bytes") + return rstring.assert_str0(source), flags def ensure_ns(space, w_globals, w_locals, funcname, caller=None): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -490,9 +490,7 @@ co = compile(memoryview(b'1+2'), '?', 'eval') assert eval(co) == 3 exc = raises(TypeError, compile, chr(0), '?', 'eval') - assert str(exc.value) == "compile() expected string without null bytes" - exc = raises(TypeError, compile, memoryview(b'1+2'), '?', 'eval') - assert str(exc.value) == "expected a readable buffer object" + assert str(exc.value) == "source code string cannot contain null bytes" compile("from __future__ import with_statement", "", "exec") raises(SyntaxError, compile, '-', '?', 'eval') raises(SyntaxError, compile, '"\\xt"', '?', 'eval') From noreply at buildbot.pypy.org Wed May 7 02:22:16 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 7 May 2014 02:22:16 +0200 (CEST) Subject: [pypy-commit] pypy default: On Unicode wide builds (=all except win32), don't merge utf16 surrogate pairs on encoding. Message-ID: <20140507002216.549EF1C332E@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r71358:d492bd661190 Date: 2014-04-11 23:10 +0200 http://bitbucket.org/pypy/pypy/changeset/d492bd661190/ Log: On Unicode wide builds (=all except win32), don't merge utf16 surrogate pairs on encoding. This only affects python3 which sets allow_surrogates=False. (grafted from 5494a374d576b41509aa34faef64465f38dbd117) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -331,7 +331,8 @@ ch2 = ord(s[pos]) # Check for low surrogate and combine the two to # form a UCS4 value - if ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF: + if ((allow_surrogates or MAXUNICODE < 65536) and + ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF): ch3 = ((ch - 0xD800) << 10 | (ch2 - 0xDC00)) + 0x10000 pos += 1 _encodeUCS4(result, ch3) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -803,3 +803,20 @@ u, len(u), True) == r'\ud800\udc00' assert runicode.unicode_encode_raw_unicode_escape( u, len(u), True) == r'\ud800\udc00' + + def test_encode_surrogate_pair_utf8(self): + u = runicode.UNICHR(0xD800) + runicode.UNICHR(0xDC00) + if runicode.MAXUNICODE < 65536: + # Narrow unicode build, consider utf16 surrogate pairs + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=False) == '\xf0\x90\x80\x80' + else: + # Wide unicode build, merge utf16 surrogate pairs only when allowed + assert runicode.unicode_encode_utf_8( + u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' + # Surrogates not merged, encoding fails. + py.test.raises( + UnicodeEncodeError, runicode.unicode_encode_utf_8, + u, len(u), True, allow_surrogates=False) From noreply at buildbot.pypy.org Wed May 7 02:22:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 02:22:17 +0200 (CEST) Subject: [pypy-commit] pypy py3k: sync w/ default Message-ID: <20140507002217.888E91C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71359:725222628d38 Date: 2014-05-06 17:21 -0700 http://bitbucket.org/pypy/pypy/changeset/725222628d38/ Log: sync w/ default diff --git a/rpython/tool/disassembler.py b/rpython/tool/disassembler.py --- a/rpython/tool/disassembler.py +++ b/rpython/tool/disassembler.py @@ -73,9 +73,9 @@ if type(x) is types.InstanceType: x = x.__class__ if hasattr(x, 'im_func'): - x = x.__func__ + x = x.im_func if hasattr(x, 'func_code'): - x = x.__code__ + x = x.func_code if hasattr(x, '__dict__'): xxx items = sorted(x.__dict__.items()) @@ -83,11 +83,11 @@ if type(x1) in (types.MethodType, types.FunctionType, types.CodeType, - type): + types.ClassType): print("Disassembly of %s:" % name) try: dis(x1) - except TypeError as msg: + except TypeError, msg: print("Sorry:", msg) print() elif hasattr(x, 'co_code'): From noreply at buildbot.pypy.org Wed May 7 02:53:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 7 May 2014 02:53:35 +0200 (CEST) Subject: [pypy-commit] pypy py3k: handle new wide build behavior per 5494a374d576 Message-ID: <20140507005335.B1D001D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71360:107418c317cd Date: 2014-05-06 17:52 -0700 http://bitbucket.org/pypy/pypy/changeset/107418c317cd/ Log: handle new wide build behavior per 5494a374d576 diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -539,14 +539,20 @@ assert str(b'+AB', 'utf-7', 'replace') == '\ufffd' def test_codecs_utf8(self): + import sys assert ''.encode('utf-8') == b'' assert '\u20ac'.encode('utf-8') == b'\xe2\x82\xac' - assert '\ud800\udc02'.encode('utf-8') == b'\xf0\x90\x80\x82' - assert '\ud84d\udc56'.encode('utf-8') == b'\xf0\xa3\x91\x96' raises(UnicodeEncodeError, '\ud800'.encode, 'utf-8') raises(UnicodeEncodeError, '\udc00'.encode, 'utf-8') raises(UnicodeEncodeError, '\udc00!'.encode, 'utf-8') - assert ('\ud800\udc02'*1000).encode('utf-8') == b'\xf0\x90\x80\x82'*1000 + if sys.maxunicode > 0xFFFF: + raises(UnicodeEncodeError, '\ud800\udc02'.encode, 'utf-8') + raises(UnicodeEncodeError, '\ud84d\udc56'.encode, 'utf-8') + raises(UnicodeEncodeError, ('\ud800\udc02'*1000).encode, 'utf-8') + else: + assert '\ud800\udc02'.encode('utf-8') == b'\xf0\x90\x80\x82' + assert '\ud84d\udc56'.encode('utf-8') == b'\xf0\xa3\x91\x96' + assert ('\ud800\udc02'*1000).encode('utf-8') == b'\xf0\x90\x80\x82'*1000 assert ( '\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f' '\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00' From noreply at buildbot.pypy.org Wed May 7 08:27:38 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 7 May 2014 08:27:38 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation on msvc after 3484aaa1e858 Message-ID: <20140507062738.39EF81C317E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71361:ad12f8418f24 Date: 2014-05-07 09:26 +0300 http://bitbucket.org/pypy/pypy/changeset/ad12f8418f24/ Log: fix translation on msvc after 3484aaa1e858 diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -753,6 +753,8 @@ def add_extra_files(eci): srcdir = py.path.local(__file__).join('..', 'src') + _MSVC = eci.platform.name == 'msvc' + files = [ srcdir / 'entrypoint.c', # ifdef PYPY_STANDALONE srcdir / 'allocator.c', # ifdef PYPY_STANDALONE @@ -769,6 +771,8 @@ ] if _CYGWIN: files.append(srcdir / 'cygwin_wait.c') + if _MSVC: + files.append(srcdir / 'asm_msvc.c') return eci.merge(ExternalCompilationInfo(separate_module_files=files)) diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,5 +1,6 @@ #ifdef PYPY_X86_CHECK_SSE2 #include +#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Wed May 7 11:02:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 7 May 2014 11:02:08 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: fix translation on msvc after 3484aaa1e858 Message-ID: <20140507090208.A657A1C155F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71362:e4ba3ae84e97 Date: 2014-05-07 09:26 +0300 http://bitbucket.org/pypy/pypy/changeset/e4ba3ae84e97/ Log: fix translation on msvc after 3484aaa1e858 diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -751,6 +751,8 @@ def add_extra_files(eci): srcdir = py.path.local(__file__).join('..', 'src') + _MSVC = eci.platform.name == 'msvc' + files = [ srcdir / 'entrypoint.c', # ifdef PYPY_STANDALONE srcdir / 'allocator.c', # ifdef PYPY_STANDALONE @@ -767,6 +769,8 @@ ] if _CYGWIN: files.append(srcdir / 'cygwin_wait.c') + if _MSVC: + files.append(srcdir / 'asm_msvc.c') return eci.merge(ExternalCompilationInfo(separate_module_files=files)) diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,5 +1,6 @@ #ifdef PYPY_X86_CHECK_SSE2 #include +#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Wed May 7 11:54:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 11:54:55 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Finish the draft Message-ID: <20140507095455.33BD01C066C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71363:ece1fe3fcadc Date: 2014-05-07 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/ece1fe3fcadc/ Log: Finish the draft diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -129,7 +129,6 @@ .. _`report bugs`: https://bugs.pypy.org/ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h -.. __: @@ -268,11 +267,13 @@ common case of acquiring several locks in nested order, they will all be elided by the same transaction. +.. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410 + Atomic sections, Transactions, etc.: a better way to write parallel programs ---------------------------------------------------------------------------- -(This section describes locks as we plan to implement them, but also +(This section is based on locks as we plan to implement them, but also works with the existing atomic sections.) In the cases where elision works, the block of code can run in parallel @@ -285,14 +286,6 @@ thread-unfriendly code because of this global lock, actually the underlying system is able to make it run on multiple cores anyway. -... - -``pypy-stm`` enables a better programming model whereby you can run -non-threaded programs on multiple cores, simply by starting multiple -threads but running each of them protected by the same lock. (Note that -"protected by the same lock" means right now "they are all protected by -``__pypy__.thread.atomic``", but this might change in the future.) - This capability can be hidden in a library or in the framework you use; the end user's code does not need to be explicitly aware of using threads. For a simple example of this, there is `transaction.py`_ in @@ -331,6 +324,8 @@ .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP +.. _`transactional_memory`: + API of transactional_memory --------------------------- @@ -357,7 +352,11 @@ The ``__pypy__.thread`` submodule is a built-in module of PyPy that contains a few internal built-in functions used by the -``transactional_memory`` module. +``transactional_memory`` module, plus the following: + +* ``__pypy__.thread.atomic``: a context manager to run a block in + fully atomic mode, without "releasing the GIL". (May be eventually + removed?) * ``__pypy__.thread.signals_enabled``: a context manager that runs its block with signals enabled. By default, signals are only enabled in @@ -367,171 +366,82 @@ his code to run elsewhere than in the main thread. +.. _contention: + Conflicts --------- Based on Software Transactional Memory, the ``pypy-stm`` solution is -prone to "conflicts". The basic idea is that threads execute their code +prone to "conflicts". To repeat the basic idea, threads execute their code speculatively, and at known points (e.g. between bytecodes) they coordinate with each other to agree on which order their respective actions should be "committed", i.e. become globally visible. Each -duration of time between two commit-points is called a "transaction" -(this is related to, but slightly different from, the transactions -above). +duration of time between two commit-points is called a transaction. A conflict occurs when there is no consistent ordering. The classical example is if two threads both tried to change the value of the same global variable. In that case, only one of them can be allowed to proceed, and the other one must be either paused or aborted (restarting -the transaction). - - - - - - - - - - -Implementation -============== - - - -.. __: Parallelization_ - - -Parallelization -=============== +the transaction). If this occurs too often, parallelization fails. How much actual parallelization a multithreaded program can see is a bit -subtle. Basically, a program not using ``thread.atomic`` or using it -for very short amounts of time will parallelize almost freely. However, -using ``thread.atomic`` for longer periods of time comes with less -obvious rules. The exact details may vary from version to version, too, -until they are a bit more stabilized. Here is an overview. +subtle. Basically, a program not using ``__pypy__.thread.atomic`` or +eliding locks, or doing so for very short amounts of time, will +parallelize almost freely (as long as it's not some artificial example +where, say, all threads try to increase the same global counter and do +nothing else). -Each thread is actually running as a sequence of "transactions", which -are separated by "transaction breaks". The execution of the whole -multithreaded program works as if all transactions were serialized. The -transactions are actually running in parallel, but this is invisible. +However, using if the program requires longer transactions, it comes +with less obvious rules. The exact details may vary from version to +version, too, until they are a bit more stabilized. Here is an +overview. -This parallelization works as long as two principles are respected. The +Parallelization works as long as two principles are respected. The first one is that the transactions must not *conflict* with each other. The most obvious sources of conflicts are threads that all increment a global shared counter, or that all store the result of their computations into the same list --- or, more subtly, that all ``pop()`` the work to do from the same list, because that is also a mutation of the list. (It is expected that some STM-aware library will eventually -be designed to help with sharing problems, like a STM-aware list or -queue.) +be designed to help with conflict problems, like a STM-aware queue.) A conflict occurs as follows: when a transaction commits (i.e. finishes successfully) it may cause other transactions that are still in progress to abort and retry. This is a waste of CPU time, but even in the worst case senario it is not worse than a GIL, because at least one -transaction succeeded (so we get at worst N-1 CPUs doing useless jobs -and 1 CPU doing a job that commits successfully). +transaction succeeds (so we get at worst N-1 CPUs doing useless jobs and +1 CPU doing a job that commits successfully). -Conflicts do occur, of course, and it is -pointless to try to avoid them all. For example they can be abundant -during some warm-up phase. What is important is to keep them rare -enough in total. +Conflicts do occur, of course, and it is pointless to try to avoid them +all. For example they can be abundant during some warm-up phase. What +is important is to keep them rare enough in total. -The other principle is that of avoiding long-running so-called -"inevitable" transactions ("inevitable" is taken in the sense of "which -cannot be avoided", i.e. transactions which cannot abort any more). We -can consider that a transaction can be in three possible modes (this is -actually a slight simplification): - -* *non-atomic:* in this mode, the interpreter is free to insert - transaction breaks more or less where it wants to. This is similar to - how, in CPython, the interpreter is free to release and reacquire the - GIL where it wants to. So in non-atomic mode, transaction breaks - occur from time to time between the execution of two bytecodes, as - well as across an external system call (the previous transaction is - committed, the system call is done outside any transaction, and - finally the next transaction is started). - -* *atomic but abortable:* transactions start in this mode at the - beginning of a ``with thread.atomic`` block. In atomic mode, - transaction breaks *never* occur, making a single potentially long - transaction. This transaction can be still be aborted if a conflict - arises, and retried as usual. - -* *atomic and inevitable:* as soon as an atomic block does a system - call, it cannot be aborted any more, because it has visible - side-effects. So we turn the transaction "inevitable" --- more - precisely, this occurs just before doing the system call. Once the - system call is started, the transaction cannot be aborted any more: - it must "inevitably" complete. This results in the following - internal restrictions: only one transaction in the whole process can - be inevitable, and moreover no other transaction can commit before - the inevitable one. In other words, as soon as there is an inevitable - transaction, the other transactions can continue and run until the end, - but then they will be paused. - -So what you should avoid is transactions that are inevitable for a long -period of time. Doing so blocks essentially all other transactions and -gives an effect similar to the GIL again. To work around the issue, you -need to organize your code in such a way that for any ``thread.atomic`` -block that runs for a noticable amount of time, you perform no I/O at -all before you are close to reaching the end of the block. - -Similarly, you should avoid doing any *blocking* I/O in ``thread.atomic`` -blocks. They work, but because the transaction is turned inevitable -*before* the I/O is performed, they will prevent any parallel work at -all. You need to organize the code so that such operations are done -completely outside ``thread.atomic``, e.g. in a separate thread. +Another issue is that of avoiding long-running so-called "inevitable" +transactions ("inevitable" is taken in the sense of "which cannot be +avoided", i.e. transactions which cannot abort any more). Transactions +like that should only occur if you use ``__pypy__.thread.atomic``, +generally become of I/O in atomic blocks. They work, but the +transaction is turned inevitable before the I/O is performed. For all +the remaining execution time of the atomic block, they will impede +parallel work. The best is to organize the code so that such operations +are done completely outside ``__pypy__.thread.atomic``. (This is related to the fact that blocking I/O operations are discouraged with Twisted, and if you really need them, you should do -them on its own separate thread. One can say that the behavior within -``thread.atomic`` looks, in a way, like the opposite of the usual -effects of the GIL: if the ``with`` block is computationally intensive -it will nicely be parallelized, but if it does any long I/O then it -prevents any parallel work.) +them on their own separate thread.) + +In case of lock elision, we don't get long-running inevitable +transactions, but a different problem can occur: doing I/O cancels lock +elision, and the lock turns into a real lock, preventing other threads +from committing if they also need this lock. (More about it when lock +elision is implemented and tested.) + Implementation ============== -XXX - - -See also -======== - -See also -https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst -(section about STM). - - -.. include:: _ref.txt - - - ----------------++++++++++++++++++++++++-------------------- - - -with lock: - sleep(1) - - -option 1: lock.is_acquired is never touched, and all is done -atomically; from the sleep() it is also inevitable; however other -transactions can commit other "with lock" blocks as long as it goes -into the past, so progress is not hindered if the other thread never -needs inevitable; drawback = no other inevitable allowed - -option 2: lock.is_acquired=True is realized by the sleep() and the -transaction commits; then other transactions cannot commit if they -elided an acquire() until we have a real write to -lock.is_acquired=False again; in the common case we need to make the -transaction longer, to try to go until the release of the lock - - +XXX this section mostly empty for now Low-level statistics @@ -572,7 +482,46 @@ medium- and long-term future work involves reducing this overhead :-) The last two lines are special; they are an internal marker read by -`transactional_memory.print_longest_marker`_. +``transactional_memory.print_abort_info()``. These statistics are not printed out for the main thread, for now. + +Reference to implementation details +----------------------------------- + +The core of the implementation is in a separate C library called stmgc_, +in the c7_ subdirectory. Please see the `README.txt`_ for more +information. In particular, the notion of segment is discussed there. + +.. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/ +.. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/ +.. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt + +PyPy itself adds on top of it the automatic placement of read__ and write__ +barriers and of `"becomes-inevitable-now" barriers`__, the logic to +`start/stop transactions as an RPython transformation`__ and as +`supporting`__ `C code`__, and the support in the JIT (mostly as a +`transformation step on the trace`__ and generation of custom assembler +in `assembler.py`__). + +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/readbarrier.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/memory/gctransform/stmframework.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/inevitable.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/jitdriver.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.h +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py +.. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py + + + +See also +======== + +See also +https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst +(section about STM). + + +.. include:: _ref.txt From noreply at buildbot.pypy.org Wed May 7 12:23:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 12:23:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Move this document from the stm branch Message-ID: <20140507102323.ABDA81C340B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71364:a421f5063e35 Date: 2014-05-07 12:22 +0200 http://bitbucket.org/pypy/pypy/changeset/a421f5063e35/ Log: Move this document from the stm branch diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -1,70 +1,77 @@ -====================== -Transactional Memory -====================== + +============================= +Software Transactional Memory +============================= .. contents:: This page is about ``pypy-stm``, a special in-development version of PyPy which can run multiple independent CPU-hungry threads in the same -process in parallel. It is side-stepping what is known in the Python -world as the "global interpreter lock (GIL)" problem. +process in parallel. It is a solution to what is known in the Python +world as the "global interpreter lock (GIL)" problem --- it is an +implementation of Python without the GIL. -"STM" stands for Software Transactional Memory, the technique used +"STM" stands for Software `Transactional Memory`_, the technique used internally. This page describes ``pypy-stm`` from the perspective of a user, describes work in progress, and finally gives references to more implementation details. -This work was done mostly by Remi Meier and Armin Rigo. Thanks to all -donors for crowd-funding the work so far! Please have a look at the -`2nd call for donation`_. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the `2nd call +for donation`_. +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory .. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% slower than a +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). -What you get in exchange for this slow-down is that ``pypy-stm`` runs -any multithreaded Python program on multiple CPUs at once. Programs -running two threads or more in parallel should ideally run faster than -in a regular PyPy, either now or soon as issues are fixed. In one way, -that's all there is to it: this is a GIL-less Python, feel free to -`download and try it`__. However, the deeper idea behind the -``pypy-stm`` project is to improve what is so far the state-of-the-art -for using multiple CPUs, which for cases where separate processes don't -work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is pushing forward an approach to *hide* the threads, as -described below in `atomic sections`_. +The benefit is that the resulting ``pypy-stm`` can execute multiple +threads of Python code in parallel. Programs running two threads or +more in parallel should ideally run faster than in a regular PyPy +(either now, or soon as bugs are fixed). +* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use + it as a drop-in replacement and multithreaded programs will run on + multiple cores. -.. __: +* ``pypy-stm`` does not impose any special API to the user, but it + provides a new pure Python module called `transactional_memory`_ with + features to inspect the state or debug conflicts_ that prevent + parallelization. This module can also be imported on top of a non-STM + PyPy or CPython. -Current status -============== +* Building on top of the way the GIL is removed, we will talk + about `Atomic sections, Transactions, etc.: a better way to write + parallel programs`_. + + +Getting Started +=============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu 12.04 binary -here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, -but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel, -in other words it is running a thread pool up to 4 threads emulating normal -threads. +interested in trying it out, you can download a Ubuntu binary here__ +(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are +release mode, but not stripped of debug symbols). The current version +supports four "segments", which means that it will run up to four +threads in parallel. To build a version from sources, you first need to compile a custom -version of clang; we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +version of clang(!); we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 `` for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for the very extensive usage that pypy-stm does of a -clang-only feature (without them, you get crashes of clang). Then get +they are fixes for a clang-only feature that hasn't been used so heavily +in the past (without the patches, you get crashes of clang). Then get the branch `stmgc-c7`_ of PyPy and run:: rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py @@ -75,23 +82,26 @@ .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ -Caveats: +.. _caveats: -* So far, small examples work fine, but there are still a number of - bugs. We're busy fixing them. +Current status +-------------- + +* So far, small examples work fine, but there are still a few bugs. + We're busy fixing them as we find them; feel free to `report bugs`_. * Currently limited to 1.5 GB of RAM (this is just a parameter in - `core.h`__). Memory overflows are not detected correctly, so may - cause segmentation faults. + `core.h`__). Memory overflows are not correctly handled; they cause + segfaults. -* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, - which is "only" bad). Moreover, you should run it with a command like - ``pypy-stm --jit trace_limit=60000 args...``; the default value of - 6000 for ``trace_limit`` is currently too low (6000 should become - reasonable again as we improve). Also, in order to produce machine - code, the JIT needs to enter a special single-threaded mode for now. - This all means that you *will* get very bad performance results if - your program doesn't run for *many* seconds for now. +* The JIT warm-up time improved recently but is still bad. In order to + produce machine code, the JIT needs to enter a special single-threaded + mode for now. This means that you will get bad performance results if + your program doesn't run for several seconds, where *several* can mean + *many.* When trying benchmarks, be sure to check that you have + reached the warmed state, i.e. the performance is not improving any + more. This should be clear from the fact that as long as it's + producing more machine code, ``pypy-stm`` will run on a single core. * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large @@ -108,111 +118,197 @@ * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in JIT-generated machine code). But the overall bookkeeping logic could - see more improvements (see Statistics_ below). - -* You can use `atomic sections`_, but the most visible missing thing is - that you don't get reports about the "conflicts" you get. This would - be the first thing that you need in order to start using atomic - sections more extensively. Also, for now: for better results, try to - explicitly force a transaction break just before (and possibly after) - each large atomic section, with ``time.sleep(0)``. + see more improvements (see `Low-level statistics`_ below). * Forking the process is slow because the complete memory needs to be - copied manually right now. + copied manually. A warning is printed to this effect. -* Very long-running processes should eventually crash on an assertion - error because of a non-implemented overflow of an internal 29-bit - number, but this requires at the very least ten hours --- more - probably, several days or more. +* Very long-running processes (on the order of days) will eventually + crash on an assertion error because of a non-implemented overflow of + an internal 29-bit number. .. _`report bugs`: https://bugs.pypy.org/ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h -Statistics +User Guide ========== + -When a non-main thread finishes, you get statistics printed to stderr, -looking like that:: +Drop-in replacement +------------------- - thread 0x7f73377fe600: - outside transaction 42182 0.506 s - run current 85466 0.000 s - run committed 34262 3.178 s - run aborted write write 6982 0.083 s - run aborted write read 550 0.005 s - run aborted inevitable 388 0.010 s - run aborted other 0 0.000 s - wait free segment 0 0.000 s - wait write read 78 0.027 s - wait inevitable 887 0.490 s - wait other 0 0.000 s - bookkeeping 51418 0.606 s - minor gc 162970 1.135 s - major gc 1 0.019 s - sync pause 59173 1.738 s - spin loop 129512 0.094 s +Multithreaded, CPU-intensive Python programs should work unchanged on +``pypy-stm``. They will run using multiple CPU cores in parallel. -The first number is a counter; the second number gives the associated -time (the amount of real time that the thread was in this state; the sum -of all the times should be equal to the total time between the thread's -start and the thread's end). The most important points are "run -committed", which gives the amount of useful work, and "outside -transaction", which should give the time spent e.g. in library calls -(right now it seems to be a bit larger than that; to investigate). -Everything else is overhead of various forms. (Short-, medium- and -long-term future work involves reducing this overhead :-) +The existing semantics of the GIL (Global Interpreter Lock) are +unchanged: although running on multiple cores in parallel, ``pypy-stm`` +gives the illusion that threads are run serially, with switches only +occurring between bytecodes, not in the middle of them. Programs can +rely on this: using ``shared_list.append()/pop()`` or +``shared_dict.setdefault()`` as synchronization mecanisms continues to +work as expected. -These statistics are not printed out for the main thread, for now. +This works by internally considering the points where a standard PyPy or +CPython would release the GIL, and replacing them with the boundaries of +"transaction". Like their database equivalent, multiple transactions +can execute in parallel, but will commit in some serial order. They +appear to behave as if they were completely run in this serialization +order. Atomic sections -=============== +--------------- -While one of the goal of pypy-stm is to give a GIL-free but otherwise -unmodified Python, the other goal is to push for a better way to use -multithreading. For this, you (as the Python programmer) get an API -in the ``__pypy__.thread`` submodule: +PyPy supports *atomic sections,* which are blocks of code which you want +to execute without "releasing the GIL". *This is experimental and may +be removed in the future.* In STM terms, this means blocks of code that +are executed while guaranteeing that the transaction is not interrupted +in the middle. -* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in - a ``with __pypy__.thread.atomic:`` statement). It runs the whole - block of code without breaking the current transaction --- from - the point of view of a regular CPython/PyPy, this is equivalent to - saying that the GIL will not be released at all between the start and - the end of this block of code. +Here is a usage example:: -The obvious usage is to use atomic blocks in the same way as one would -use locks: to protect changes to some shared data, you do them in a -``with atomic`` block, just like you would otherwise do them in a ``with -mylock`` block after ``mylock = thread.allocate_lock()``. This allows -you not to care about acquiring the correct locks in the correct order; -it is equivalent to having only one global lock. This is how -transactional memory is `generally described`__: as a way to efficiently -execute such atomic blocks, running them in parallel while giving the -illusion that they run in some serial order. + with __pypy__.thread.atomic: + assert len(lst1) == 10 + x = lst1.pop(0) + lst1.append(x) -.. __: http://en.wikipedia.org/wiki/Transactional_memory +In this (bad) example, we are sure that the item popped off one end of +the list is appened again at the other end atomically. It means that +another thread can run ``len(lst1)`` or ``x in lst1`` without any +particular synchronization, and always see the same results, +respectively ``10`` and ``True``. It will never see the intermediate +state where ``lst1`` only contains 9 elements. Atomic sections are +similar to re-entrant locks (they can be nested), but additionally they +protect against the concurrent execution of *any* code instead of just +code that happens to be protected by the same lock in other threads. -However, the less obvious intended usage of atomic sections is as a -wide-ranging replacement of explicit threads. You can turn a program -that is not multi-threaded at all into a program that uses threads -internally, together with large atomic sections to keep the behavior -unchanged. This capability can be hidden in a library or in the -framework you use; the end user's code does not need to be explicitly -aware of using threads. For a simple example of this, see -`transaction.py`_ in ``lib_pypy``. The idea is that if you have a -program where the function ``f(key, value)`` runs on every item of some -big dictionary, you can replace the loop with:: +Note that the notion of atomic sections is very strong. If you write +code like this:: + + with __pypy__.thread.atomic: + time.sleep(10) + +then, if you think about it as if we had a GIL, you are executing a +10-seconds-long atomic transaction without releasing the GIL at all. +This prevents all other threads from progressing at all. While it is +not strictly true in ``pypy-stm``, the exact rules for when other +threads can progress or not are rather complicated; you have to consider +it likely that such a piece of code will eventually block all other +threads anyway. + +Note that if you want to experiment with ``atomic``, you may have to add +manually a transaction break just before the atomic block. This is +because the boundaries of the block are not guaranteed to be the +boundaries of the transaction: the latter is at least as big as the +block, but maybe bigger. Therefore, if you run a big atomic block, it +is a good idea to break the transaction just before. This can be done +e.g. by the hack of calling ``time.sleep(0)``. (This may be fixed at +some point.) + +There are also issues with the interaction of locks and atomic blocks. +This can be seen if you write to files (which have locks), including +with a ``print`` to standard output. If one thread tries to acquire a +lock while running in an atomic block, and another thread has got the +same lock, then the former may fail with a ``thread.error``. The reason +is that "waiting" for some condition to become true --while running in +an atomic block-- does not really make sense. For now you can work +around it by making sure that, say, all your prints are either in an +``atomic`` block or none of them are. (This kind of issue is +theoretically hard to solve.) + + +Locks +----- + +**Not Implemented Yet** + +The thread module's locks have their basic semantic unchanged. However, +using them (e.g. in ``with my_lock:`` blocks) starts an alternative +running mode, called `Software lock elision`_. This means that PyPy +will try to make sure that the transaction extends until the point where +the lock is released, and if it succeeds, then the acquiring and +releasing of the lock will be "elided". This means that in this case, +the whole transaction will technically not cause any write into the lock +object --- it was unacquired before, and is still unacquired after the +transaction. + +This is specially useful if two threads run ``with my_lock:`` blocks +with the same lock. If they each run a transaction that is long enough +to contain the whole block, then all writes into the lock will be elided +and the two transactions will not conflict with each other. As usual, +they will be serialized in some order: one of the two will appear to run +before the other. Simply, each of them executes an "acquire" followed +by a "release" in the same transaction. As explained above, the lock +state goes from "unacquired" to "unacquired" and can thus be left +unchanged. + +This approach can gracefully fail: unlike atomic sections, there is no +guarantee that the transaction runs until the end of the block. If you +perform any input/output while you hold the lock, the transaction will +end as usual just before the input/output operation. If this occurs, +then the lock elision mode is cancelled and the lock's "acquired" state +is really written. + +Even if the lock is really acquired already, a transaction doesn't have +to wait for it to become free again. It can enter the elision-mode anyway +and tentatively execute the content of the block. It is only at the end, +when trying to commit, that the thread will pause. As soon as the real +value stored in the lock is switched back to "unacquired", it can then +proceed and attempt to commit its already-executed transaction (which +can fail and abort and restart from the scratch, as usual). + +Note that this is all *not implemented yet,* but we expect it to work +even if you acquire and release several locks. The elision-mode +transaction will extend until the first lock you acquired is released, +or until the code performs an input/output or a wait operation (for +example, waiting for another lock that is currently not free). In the +common case of acquiring several locks in nested order, they will all be +elided by the same transaction. + +.. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410 + + +Atomic sections, Transactions, etc.: a better way to write parallel programs +---------------------------------------------------------------------------- + +(This section is based on locks as we plan to implement them, but also +works with the existing atomic sections.) + +In the cases where elision works, the block of code can run in parallel +with other blocks of code *even if they are protected by the same lock.* +You still get the illusion that the blocks are run sequentially. This +works even for multiple threads that run each a series of such blocks +and nothing else, protected by one single global lock. This is +basically the Python application-level equivalent of what was done with +the interpreter in ``pypy-stm``: while you think you are writing +thread-unfriendly code because of this global lock, actually the +underlying system is able to make it run on multiple cores anyway. + +This capability can be hidden in a library or in the framework you use; +the end user's code does not need to be explicitly aware of using +threads. For a simple example of this, there is `transaction.py`_ in +``lib_pypy``. The idea is that you write, or already have, some program +where the function ``f(key, value)`` runs on every item of some big +dictionary, say:: + + for key, value in bigdict.items(): + f(key, value) + +Then you simply replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) transaction.run() This code runs the various calls to ``f(key, value)`` using a thread -pool, but every single call is done in an atomic section. The end -result is that the behavior should be exactly equivalent: you don't get -any extra multithreading issue. +pool, but every single call is executed under the protection of a unique +lock. The end result is that the behavior is exactly equivalent --- in +fact it makes little sense to do it in this way on a non-STM PyPy or on +CPython. But on ``pypy-stm``, the various locked calls to ``f(key, +value)`` can tentatively be executed in parallel, even if the observable +result is as if they were executed in some serial order. This approach hides the notion of threads from the end programmer, including all the hard multithreading-related issues. This is not the @@ -223,41 +319,176 @@ only requires that the end programmer identifies where this parallelism is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. - + .. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP -================== -Other APIs in pypy-stm: +.. _`transactional_memory`: -* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" - in this pypy-stm. This is the limit above which more threads will not - be able to execute on more cores. (Right now it is limited to 4 due - to inter-segment overhead, but should be increased in the future. It +API of transactional_memory +--------------------------- + +The new pure Python module ``transactional_memory`` runs on both CPython +and PyPy, both with and without STM. It contains: + +* ``getsegmentlimit()``: return the number of "segments" in + this pypy-stm. This is the limit above which more threads will not be + able to execute on more cores. (Right now it is limited to 4 due to + inter-segment overhead, but should be increased in the future. It should also be settable, and the default value should depend on the - number of actual CPUs.) + number of actual CPUs.) If STM is not available, this returns 1. -* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but - raises an exception if you attempt to nest it inside another - ``atomic``. +* ``print_abort_info(minimum_time=0.0)``: debugging help. Each thread + remembers the longest abort or pause it did because of cross-thread + contention_. This function prints it to ``stderr`` if the time lost + is greater than ``minimum_time`` seconds. The record is then + cleared, to make it ready for new events. This function returns + ``True`` if it printed a report, and ``False`` otherwise. -* ``__pypy__.thread.signals_enabled``: a context manager that runs - its block with signals enabled. By default, signals are only - enabled in the main thread; a non-main thread will not receive - signals (this is like CPython). Enabling signals in non-main threads - is useful for libraries where threads are hidden and the end user is - not expecting his code to run elsewhere than in the main thread. -Note that all of this API is (or will be) implemented in a regular PyPy -too: for example, ``with atomic`` will simply mean "don't release the -GIL" and ``getsegmentlimit()`` will return 1. +API of __pypy__.thread +---------------------- -================== +The ``__pypy__.thread`` submodule is a built-in module of PyPy that +contains a few internal built-in functions used by the +``transactional_memory`` module, plus the following: + +* ``__pypy__.thread.atomic``: a context manager to run a block in + fully atomic mode, without "releasing the GIL". (May be eventually + removed?) + +* ``__pypy__.thread.signals_enabled``: a context manager that runs its + block with signals enabled. By default, signals are only enabled in + the main thread; a non-main thread will not receive signals (this is + like CPython). Enabling signals in non-main threads is useful for + libraries where threads are hidden and the end user is not expecting + his code to run elsewhere than in the main thread. + + +.. _contention: + +Conflicts +--------- + +Based on Software Transactional Memory, the ``pypy-stm`` solution is +prone to "conflicts". To repeat the basic idea, threads execute their code +speculatively, and at known points (e.g. between bytecodes) they +coordinate with each other to agree on which order their respective +actions should be "committed", i.e. become globally visible. Each +duration of time between two commit-points is called a transaction. + +A conflict occurs when there is no consistent ordering. The classical +example is if two threads both tried to change the value of the same +global variable. In that case, only one of them can be allowed to +proceed, and the other one must be either paused or aborted (restarting +the transaction). If this occurs too often, parallelization fails. + +How much actual parallelization a multithreaded program can see is a bit +subtle. Basically, a program not using ``__pypy__.thread.atomic`` or +eliding locks, or doing so for very short amounts of time, will +parallelize almost freely (as long as it's not some artificial example +where, say, all threads try to increase the same global counter and do +nothing else). + +However, using if the program requires longer transactions, it comes +with less obvious rules. The exact details may vary from version to +version, too, until they are a bit more stabilized. Here is an +overview. + +Parallelization works as long as two principles are respected. The +first one is that the transactions must not *conflict* with each other. +The most obvious sources of conflicts are threads that all increment a +global shared counter, or that all store the result of their +computations into the same list --- or, more subtly, that all ``pop()`` +the work to do from the same list, because that is also a mutation of +the list. (It is expected that some STM-aware library will eventually +be designed to help with conflict problems, like a STM-aware queue.) + +A conflict occurs as follows: when a transaction commits (i.e. finishes +successfully) it may cause other transactions that are still in progress +to abort and retry. This is a waste of CPU time, but even in the worst +case senario it is not worse than a GIL, because at least one +transaction succeeds (so we get at worst N-1 CPUs doing useless jobs and +1 CPU doing a job that commits successfully). + +Conflicts do occur, of course, and it is pointless to try to avoid them +all. For example they can be abundant during some warm-up phase. What +is important is to keep them rare enough in total. + +Another issue is that of avoiding long-running so-called "inevitable" +transactions ("inevitable" is taken in the sense of "which cannot be +avoided", i.e. transactions which cannot abort any more). Transactions +like that should only occur if you use ``__pypy__.thread.atomic``, +generally become of I/O in atomic blocks. They work, but the +transaction is turned inevitable before the I/O is performed. For all +the remaining execution time of the atomic block, they will impede +parallel work. The best is to organize the code so that such operations +are done completely outside ``__pypy__.thread.atomic``. + +(This is related to the fact that blocking I/O operations are +discouraged with Twisted, and if you really need them, you should do +them on their own separate thread.) + +In case of lock elision, we don't get long-running inevitable +transactions, but a different problem can occur: doing I/O cancels lock +elision, and the lock turns into a real lock, preventing other threads +from committing if they also need this lock. (More about it when lock +elision is implemented and tested.) + + + +Implementation +============== + +XXX this section mostly empty for now + + +Low-level statistics +-------------------- + +When a non-main thread finishes, you get low-level statistics printed to +stderr, looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + sync commit soon 1 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + longest recordered marker 0.000826 s + "File "x.py", line 5, in f" + +On each line, the first number is a counter, and the second number gives +the associated time --- the amount of real time that the thread was in +this state. The sum of all the times should be equal to the total time +between the thread's start and the thread's end. The most important +points are "run committed", which gives the amount of useful work, and +"outside transaction", which should give the time spent e.g. in library +calls (right now it seems to be larger than that; to investigate). The +various "run aborted" and "wait" entries are time lost due to +conflicts_. Everything else is overhead of various forms. (Short-, +medium- and long-term future work involves reducing this overhead :-) + +The last two lines are special; they are an internal marker read by +``transactional_memory.print_abort_info()``. + +These statistics are not printed out for the main thread, for now. Reference to implementation details -=================================== +----------------------------------- The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more @@ -282,3 +513,15 @@ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py + + + +See also +======== + +See also +https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst +(section about STM). + + +.. include:: _ref.txt From noreply at buildbot.pypy.org Wed May 7 12:36:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 12:36:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Disable asmgcc on Win32 in the same way as it was done on OS/X. Message-ID: <20140507103624.0761A1C340B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71365:d899a8048351 Date: 2014-05-07 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/d899a8048351/ Log: Disable asmgcc on Win32 in the same way as it was done on OS/X. diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -17,13 +17,8 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" - ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] -elif compiler.name == 'msvc': - DEFL_ROOTFINDER_WITHJIT = "shadowstack" - ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" - ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -91,7 +86,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ROOTFINDERS, + ["n/a", "shadowstack", "asmgcc"], "shadowstack", cmdline="--gcrootfinder", requires={ @@ -372,9 +367,10 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X + # disallow asmgcc on OS/X and on Win32 if config.translation.gcrootfinder == "asmgcc": - assert sys.platform != "darwin" + assert sys.platform != "darwin", "'asmgcc' not supported on OS/X" + assert sys.platform != "win32", "'asmgcc' not supported on Win32" # ---------------------------------------------------------------- From noreply at buildbot.pypy.org Wed May 7 12:55:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 12:55:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: hg merge default (from d010fbc1841c, which is the last revision that was Message-ID: <20140507105540.E965E1D23F1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71366:b0339cb53372 Date: 2014-05-07 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/b0339cb53372/ Log: hg merge default (from d010fbc1841c, which is the last revision that was merged into release-2.3.x) diff too long, truncating to 2000 out of 39274 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -64,6 +64,7 @@ ^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ +^pypy/goal/.+\.lib$ ^pypy/_cache$ ^pypy/doc/statistic/.+\.html$ ^pypy/doc/statistic/.+\.eps$ diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = types.ModuleType('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -10,6 +10,7 @@ import tempfile import unittest import argparse +import gc from StringIO import StringIO @@ -47,6 +48,9 @@ def tearDown(self): os.chdir(self.old_dir) + for root, dirs, files in os.walk(self.temp_dir, topdown=False): + for name in files: + os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py --- a/lib-python/2.7/test/test_file.py +++ b/lib-python/2.7/test/test_file.py @@ -301,6 +301,7 @@ self.fail("readlines() after next() with empty buffer " "failed. Got %r, expected %r" % (line, testline)) # Reading after iteration hit EOF shouldn't hurt either + f.close() f = self.open(TESTFN, 'rb') try: for line in f: diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -162,6 +162,7 @@ # Remark: Do not perform more than one test per open file, # since that does NOT catch the readline error on Windows. data = 'xxx' + self.f.close() for mode in ['w', 'wb', 'a', 'ab']: for attr in ['read', 'readline', 'readlines']: self.f = open(TESTFN, mode) @@ -478,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_genericpath.py b/lib-python/2.7/test/test_genericpath.py --- a/lib-python/2.7/test/test_genericpath.py +++ b/lib-python/2.7/test/test_genericpath.py @@ -231,9 +231,14 @@ unicwd = u'\xe7w\xf0' try: fsencoding = test_support.TESTFN_ENCODING or "ascii" - unicwd.encode(fsencoding) + asciival = unicwd.encode(fsencoding) + if fsencoding == "mbcs": + # http://bugs.python.org/issue850997 + v = asciival.find('?') + if v >= 0: + raise UnicodeEncodeError(fsencoding, unicwd, v, v, asciival) except (AttributeError, UnicodeEncodeError): - # FS encoding is probably ASCII + # FS encoding is probably ASCII or windows and codepage is non-Latin1 pass else: with test_support.temp_cwd(unicwd): diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,6 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) + f.close() def test_head(self): response = self.request( diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,3 +1,6 @@ +import imp +import os + try: import cpyext except ImportError: @@ -10,4 +13,13 @@ pass # obscure condition of _ctypes_test.py being imported by py.test else: import _pypy_testcapi - _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') + cfile = '_ctypes_test.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) + except ImportError: + print('could not find _ctypes_test in %s' % output_dir) + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,23 @@ import os, sys, imp -import tempfile +import tempfile, binascii + + +def get_hashed_dir(cfile): + with open(cfile,'r') as fid: + content = fid.read() + # from cffi's Verifier() + key = '\x00'.join([sys.version[:3], content]) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + if not os.path.exists(output_dir): + os.mkdir(output_dir) + return output_dir + def _get_c_extension_suffix(): for ext, mod, typ in imp.get_suffixes(): @@ -7,12 +25,13 @@ return ext -def compile_shared(csource, modulename): +def compile_shared(csource, modulename, output_dir=None): """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, and import it. """ thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() + if output_dir is None: + output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,7 +1,19 @@ +import imp +import os + try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") -else: - import _pypy_testcapi - _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') + +import _pypy_testcapi +cfile = '_testcapimodule.c' +thisdir = os.path.dirname(__file__) +output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + +try: + fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) + with fp: + imp.load_module('_testcapi', fp, filename, description) +except ImportError: + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,10 +1,12 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ .. _`lib-python/2.7/dis.py`: https://bitbucket.org/pypy/pypy/src/default/lib-python/2.7/dis.py .. _`lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/ .. _`lib_pypy/greenlet.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/greenlet.py -.. _`lib_pypy/pypy_test/`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/pypy_test/ .. _`lib_pypy/tputil.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/tputil.py .. _`lib_pypy/transaction.py`: https://bitbucket.org/pypy/pypy/src/default/lib_pypy/transaction.py .. _`pypy/bin/`: https://bitbucket.org/pypy/pypy/src/default/pypy/bin/ @@ -36,7 +38,6 @@ .. _`pypy/interpreter/gateway.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/gateway.py .. _`pypy/interpreter/mixedmodule.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/mixedmodule.py .. _`pypy/interpreter/module.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/module.py -.. _`pypy/interpreter/nestedscope.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/nestedscope.py .. _`pypy/interpreter/pyframe.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyframe.py .. _`pypy/interpreter/pyopcode.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/interpreter/pyopcode.py .. _`pypy/interpreter/pyparser`: @@ -50,21 +51,21 @@ .. _`pypy/module`: .. _`pypy/module/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/ .. _`pypy/module/__builtin__/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/__init__.py +.. _`pypy/module/cppyy/capi/__init__.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/__init__.py +.. _`pypy/module/cppyy/capi/builtin_capi.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/capi/builtin_capi.py +.. _`pypy/module/cppyy/include/capi.h`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/include/capi.h +.. _`pypy/module/test_lib_pypy/`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/test_lib_pypy/ .. _`pypy/objspace/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/ -.. _`pypy/objspace/flow/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/flow/ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ -.. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py +.. _`pypy/objspace/std/bytesobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/bytesobject.py .. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py -.. _`pypy/objspace/std/stringtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/stringtype.py +.. _`pypy/objspace/std/strbufobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/strbufobject.py .. _`pypy/objspace/std/transparent.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/transparent.py -.. _`pypy/objspace/std/tupleobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupleobject.py -.. _`pypy/objspace/std/tupletype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/tupletype.py .. _`pypy/tool/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/ -.. _`pypy/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/algo/ .. _`pypy/tool/pytest/`: https://bitbucket.org/pypy/pypy/src/default/pypy/tool/pytest/ .. _`rpython/annotator`: .. _`rpython/annotator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/annotator/ @@ -76,6 +77,11 @@ .. _`rpython/config/translationoption.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/config/translationoption.py .. _`rpython/flowspace/`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/ .. _`rpython/flowspace/model.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/flowspace/model.py +.. _`rpython/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/ +.. _`rpython/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/generation.py +.. _`rpython/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/hybrid.py +.. _`rpython/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/minimarkpage.py +.. _`rpython/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/memory/gc/semispace.py .. _`rpython/rlib`: .. _`rpython/rlib/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/ .. _`rpython/rlib/listsort.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rlib/listsort.py @@ -94,16 +100,12 @@ .. _`rpython/rtyper/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ .. _`rpython/rtyper/lltypesystem/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/ .. _`rpython/rtyper/lltypesystem/lltype.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/lltype.py -.. _`rpython/rtyper/memory/`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/ -.. _`rpython/rtyper/memory/gc/generation.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/generation.py -.. _`rpython/rtyper/memory/gc/hybrid.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/hybrid.py -.. _`rpython/rtyper/memory/gc/minimarkpage.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/minimarkpage.py -.. _`rpython/rtyper/memory/gc/semispace.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/memory/gc/semispace.py .. _`rpython/rtyper/rint.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rint.py .. _`rpython/rtyper/rlist.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rlist.py .. _`rpython/rtyper/rmodel.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rmodel.py .. _`rpython/rtyper/rtyper.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/rtyper.py .. _`rpython/rtyper/test/test_llinterp.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/test/test_llinterp.py +.. _`rpython/tool/algo/`: https://bitbucket.org/pypy/pypy/src/default/rpython/tool/algo/ .. _`rpython/translator`: .. _`rpython/translator/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/ .. _`rpython/translator/backendopt/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/backendopt/ diff --git a/pypy/doc/cleanup.rst b/pypy/doc/cleanup.rst --- a/pypy/doc/cleanup.rst +++ b/pypy/doc/cleanup.rst @@ -9,9 +9,3 @@ distribution.rst - dot-net.rst - - - - - diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -742,9 +742,9 @@ Testing modules in ``lib_pypy/`` -------------------------------- -You can go to the `lib_pypy/pypy_test/`_ directory and invoke the testing tool +You can go to the `pypy/module/test_lib_pypy/`_ directory and invoke the testing tool ("py.test" or "python ../../pypy/test_all.py") to run tests against the -lib_pypy hierarchy. Note, that tests in `lib_pypy/pypy_test/`_ are allowed +lib_pypy hierarchy. Note, that tests in `pypy/module/test_lib_pypy/`_ are allowed and encouraged to let their tests run at interpreter level although `lib_pypy/`_ modules eventually live at PyPy's application level. This allows us to quickly test our python-coded reimplementations @@ -835,15 +835,6 @@ web interface. .. _`development tracker`: https://bugs.pypy.org/ - -use your codespeak login or register ------------------------------------- - -If you have an existing codespeak account, you can use it to login within the -tracker. Else, you can `register with the tracker`_ easily. - - -.. _`register with the tracker`: https://bugs.pypy.org/user?@template=register .. _`roundup`: http://roundup.sourceforge.net/ diff --git a/pypy/doc/config/objspace.usemodules.oracle.txt b/pypy/doc/config/objspace.usemodules.oracle.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.oracle.txt +++ /dev/null @@ -1,2 +0,0 @@ -Use the 'oracle' module. -This module is off by default, requires oracle client installed. diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst --- a/pypy/doc/config/opt.rst +++ b/pypy/doc/config/opt.rst @@ -46,5 +46,5 @@ The default level is `2`. -.. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ +.. _`Boehm-Demers-Weiser garbage collector`: http://hboehm.info/gc/ .. _`custom garbage collectors`: ../garbage_collection.html diff --git a/pypy/doc/config/translation.backendopt.txt b/pypy/doc/config/translation.backendopt.txt --- a/pypy/doc/config/translation.backendopt.txt +++ b/pypy/doc/config/translation.backendopt.txt @@ -1,5 +1,5 @@ This group contains options about various backend optimization passes. Most of them are described in the `EU report about optimization`_ -.. _`EU report about optimization`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU report about optimization`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -15,21 +15,21 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer - Matti Picus Hakan Ardo Benjamin Peterson - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns - Manuel Jacob Eric van Riet Paap Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen - Ronan Lamy Lukas Diekmann Sven Hager Anders Lehmann @@ -38,23 +38,23 @@ Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak - Romain Guillebert Guido Wesdorp Lawrence Oluyede - Remi Meier Bartosz Skowron Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen Jason Creighton Alex Martelli @@ -71,6 +71,7 @@ Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas @@ -87,6 +88,7 @@ Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin Stefano Rivera @@ -95,13 +97,17 @@ Georg Brandl Bert Freudenberg Stian Andreassen + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller - Laurence Tratt + Jeremy Thurgood + Gregor Wegberg Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -110,9 +116,7 @@ David Ripton Dusty Phillips Lukas Renggli - Edd Barrett Guenter Jantzen - Tobias Oberstein Ned Batchelder Amit Regmi Ben Young @@ -123,7 +127,6 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -134,7 +137,6 @@ Olivier Dormond Jared Grubb Karl Bartel - Tobias Pape Brian Dorsey Victor Stinner Andrews Medina @@ -146,9 +148,9 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen Jonathan David Riehl Stanislaw Halik @@ -161,7 +163,9 @@ Alexander Sedov Corbin Simpson Christopher Pope + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan Alexis Daboville @@ -170,6 +174,7 @@ Karl Ramm Pieter Zieschang Gabriel + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -180,6 +185,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -200,7 +206,6 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -212,28 +217,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -241,6 +253,7 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths Mike Bayer Flavio Percoco Kristoffer Kleine diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -560,6 +560,12 @@ Fixing these bootstrap problems is on the TODO list. The global namespace is ``cppyy.gbl``. +* **NULL**: Is represented as ``cppyy.gbl.nullptr``. + In C++11, the keyword ``nullptr`` is used to represent ``NULL``. + For clarity of intent, it is recommended to use this instead of ``None`` + (or the integer ``0``, which can serve in some cases), as ``None`` is better + understood as ``void`` in C++. + * **operator conversions**: If defined in the C++ class and a python equivalent exists (i.e. all builtin integer and floating point types, as well as ``bool``), it will map onto that python conversion. @@ -577,7 +583,7 @@ Special care needs to be taken for global operator overloads in C++: first, make sure that they are actually reflected, especially for the global overloads for ``operator==`` and ``operator!=`` of STL vector iterators in - the case of gcc (note that they are not needed to iterator over a vector). + the case of gcc (note that they are not needed to iterate over a vector). Second, make sure that reflection info is loaded in the proper order. I.e. that these global overloads are available before use. diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst --- a/pypy/doc/cppyy_backend.rst +++ b/pypy/doc/cppyy_backend.rst @@ -51,3 +51,6 @@ to ``PATH``). In case of the former, include files are expected under ``$ROOTSYS/include`` and libraries under ``$ROOTSYS/lib``. + + +.. include:: _ref.txt diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -58,7 +58,6 @@ math mmap operator - oracle parser posix pyexpat @@ -106,23 +105,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less @@ -315,6 +334,15 @@ implementation detail that shows up because of internal C-level slots that PyPy does not have. +* on CPython, ``[].__add__`` is a ``method-wrapper``, and + ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal + bound or unbound method objects. This can occasionally confuse some + tools that inspect built-in types. For example, the standard + library ``inspect`` module has a function ``ismethod()`` that returns + True on unbound method objects but False on method-wrappers or slot + wrappers. On PyPy we can't tell the difference, so + ``ismethod([].__add__) == ismethod(list.__add__) == True``. + * the ``__dict__`` attribute of new-style classes returns a normal dict, as opposed to a dict proxy like in CPython. Mutating the dict will change the type and vice versa. For builtin types, a dictionary will be returned that diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -47,7 +47,7 @@ `pypy/tool/`_ various utilities and hacks used from various places -`pypy/tool/algo/`_ general-purpose algorithmic and mathematic +`rpython/tool/algo/`_ general-purpose algorithmic and mathematic tools `pypy/tool/pytest/`_ support code for our `testing methods`_ @@ -129,3 +129,4 @@ .. _Mono: http://www.mono-project.com/ .. _`"standard library"`: rlib.html .. _`graph viewer`: getting-started-dev.html#try-out-the-translator +.. include:: _ref.txt diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -11,7 +11,5 @@ discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst - discussion/outline-external-ootype.rst - discussion/VM-integration.rst diff --git a/pypy/doc/distribution.rst b/pypy/doc/distribution.rst --- a/pypy/doc/distribution.rst +++ b/pypy/doc/distribution.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - ============================= lib_pypy/distributed features ============================= diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -51,6 +51,8 @@ .. function:: int pypy_execute_source_ptr(char* source, void* ptr); + .. note:: Not available in PyPy <= 2.2.1 + Just like the above, except it registers a magic argument in the source scope as ``c_argument``, where ``void*`` is encoded as Python int. @@ -100,9 +102,29 @@ Worked! +.. note:: If the compilation fails because of missing PyPy.h header file, + you are running PyPy <= 2.2.1, please see the section `Missing PyPy.h`_. + +Missing PyPy.h +-------------- + +.. note:: PyPy.h is in the nightly builds and goes to new PyPy releases (>2.2.1). + +For PyPy <= 2.2.1, you can download PyPy.h from PyPy repository (it has been added in commit c4cd6ec): + +.. code-block:: bash + + cd /opt/pypy/include + wget https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h + + More advanced example --------------------- +.. note:: This example depends on pypy_execute_source_ptr which is not available + in PyPy <= 2.2.1. You might want to see the alternative example + below. + Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy @@ -161,6 +183,97 @@ is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` and fill the structure from Python side for the future use. +Alternative example +------------------- + +As ``pypy_execute_source_ptr`` is not available in PyPy 2.2.1, you might want to try +an alternative approach which relies on -export-dynamic flag to the GNU linker. +The downside to this approach is that it is platform dependent. + +.. code-block:: c + + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + lib = ffi.verify('int callback(int (*func)(int));')\n\ + lib.callback(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source(source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + + +Make sure to pass -export-dynamic flag when compiling:: + + $ gcc -g -o x x.c -lpypy-c -L. -export-dynamic + $ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + +Finding pypy_home +----------------- + +Function pypy_setup_home takes one parameter - the path to libpypy. There's +currently no "clean" way (pkg-config comes to mind) how to find this path. You +can try the following (GNU-specific) hack (don't forget to link against *dl*): + +.. code-block:: c + + #if !(_GNU_SOURCE) + #define _GNU_SOURCE + #endif + + #include + #include + #include + + // caller should free returned pointer to avoid memleaks + // returns NULL on error + char* guess_pypyhome() { + // glibc-only (dladdr is why we #define _GNU_SOURCE) + Dl_info info; + void *_rpython_startup_code = dlsym(0,"rpython_startup_code"); + if (_rpython_startup_code == 0) { + return 0; + } + if (dladdr(_rpython_startup_code, &info) != 0) { + const char* lib_path = info.dli_fname; + char* lib_realpath = realpath(lib_path, 0); + return lib_realpath; + } + return 0; + } + + Threading --------- diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -17,7 +17,7 @@ Read more in the `EuroPython 2006 sprint report`_. -.. _`EuroPython 2006 sprint report`: http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt +.. _`EuroPython 2006 sprint report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt PyPy at XP 2006 and Agile 2006 ================================================================== @@ -41,8 +41,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.html +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy sprint at Akihabara (Tokyo, Japan) ================================================================== @@ -84,8 +84,8 @@ Read the report_ and the original announcement_. -.. _report: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.html -.. _announcement: http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/sprint-announcement.html +.. _report: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt +.. _announcement: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/sprint-announcement.txt PyCon Sprint 2006 (Dallas, Texas, USA) ================================================================== @@ -114,7 +114,7 @@ said they were interested in the outcome and would keep an eye on its progress. Read the `talk slides`_. -.. _`talk slides`: http://codespeak.net/pypy/extradoc/talk/solutions-linux-paris-2006.html +.. _`talk slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/solutions-linux-paris-2006.html PyPy Sprint in Palma De Mallorca 23rd - 29th January 2006 @@ -129,9 +129,9 @@ for the first three days and `one for the rest of the sprint`_. -.. _`the announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/sprint-announcement.html -.. _`sprint report`: http://codespeak.net/pipermail/pypy-dev/2006q1/002746.html -.. _`one for the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2006q1/002749.html +.. _`the announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/sprint-announcement.txt +.. _`sprint report`: https://mail.python.org/pipermail/pypy-dev/2006-January/002746.html +.. _`one for the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2006-January/002749.html Preliminary EU reports released =============================== @@ -155,8 +155,8 @@ Michael and Carl have written a `report about the first half`_ and `one about the second half`_ of the sprint. *(12/18/2005)* -.. _`report about the first half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002656.html -.. _`one about the second half`: http://codespeak.net/pipermail/pypy-dev/2005q4/002660.html +.. _`report about the first half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002656.html +.. _`one about the second half`: https://mail.python.org/pipermail/pypy-dev/2005-December/002660.html PyPy release 0.8.0 =================== @@ -187,12 +187,12 @@ way back. *(10/18/2005)* -.. _`Logilab offices in Paris`: http://codespeak.net/pypy/extradoc/sprintinfo/paris-2005-sprint.html +.. _`Logilab offices in Paris`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris-2005-sprint.txt .. _JIT: http://en.wikipedia.org/wiki/Just-in-time_compilation .. _`continuation-passing`: http://en.wikipedia.org/wiki/Continuation_passing_style -.. _`report about day one`: http://codespeak.net/pipermail/pypy-dev/2005q4/002510.html -.. _`one about day two and three`: http://codespeak.net/pipermail/pypy-dev/2005q4/002512.html -.. _`the rest of the sprint`: http://codespeak.net/pipermail/pypy-dev/2005q4/002514.html +.. _`report about day one`: https://mail.python.org/pipermail/pypy-dev/2005-October/002510.html +.. _`one about day two and three`: https://mail.python.org/pipermail/pypy-dev/2005-October/002512.html +.. _`the rest of the sprint`: https://mail.python.org/pipermail/pypy-dev/2005-October/002514.html PyPy release 0.7.0 =================== @@ -217,15 +217,13 @@ Its main focus is translation of the whole PyPy interpreter to a low level language and reaching 2.4.1 Python compliance. The goal of the sprint is to release a first self-contained -PyPy-0.7 version. Carl has written a report about `day 1 - 3`_, -there are `some pictures`_ online and a `heidelberg summary report`_ -detailing some of the works that led to the successful release -of `pypy-0.7.0`_! +PyPy-0.7 version. Carl has written a report about `day 1 - 3`_ +and a `heidelberg summary report`_ detailing some of the works +that led to the successful release of `pypy-0.7.0`_! -.. _`heidelberg summary report`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.html -.. _`PyPy sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-sprint.html -.. _`day 1 - 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002287.html -.. _`some pictures`: http://codespeak.net/~hpk/heidelberg-sprint/ +.. _`heidelberg summary report`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt +.. _`PyPy sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-sprint.txt +.. _`day 1 - 3`: https://mail.python.org/pipermail/pypy-dev/2005-August/002287.html PyPy Hildesheim2 finished: first self-contained PyPy run! =========================================================== @@ -233,20 +231,16 @@ Up until 31st August we were in a PyPy sprint at `Trillke-Gut`_. Carl has written a `report about day 1`_, Holger about `day 2 and day 3`_ and Carl again about `day 4 and day 5`_, -On `day 6`_ Holger reports the `breakthrough`_: PyPy runs -on its own! Hurray_!. And Carl finally reports about the winding +On `day 6`_ Holger reports the breakthrough: PyPy runs +on its own! Hurray!. And Carl finally reports about the winding down of `day 7`_ which saw us relaxing, discussing and generally -having a good time. You might want to look at the selected -`pictures from the sprint`_. +having a good time. -.. _`report about day 1`: http://codespeak.net/pipermail/pypy-dev/2005q3/002217.html -.. _`day 2 and day 3`: http://codespeak.net/pipermail/pypy-dev/2005q3/002220.html -.. _`day 4 and day 5`: http://codespeak.net/pipermail/pypy-dev/2005q3/002234.html -.. _`day 6`: http://codespeak.net/pipermail/pypy-dev/2005q3/002239.html -.. _`day 7`: http://codespeak.net/pipermail/pypy-dev/2005q3/002245.html -.. _`breakthrough`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Thumbnails/36.jpg -.. _`hurray`: http://codespeak.net/~hpk/hildesheim2-sprint-www/hildesheim2-sprint-www-Pages/Image37.html -.. _`pictures from the sprint`: http://codespeak.net/~hpk/hildesheim2-sprint-www/ +.. _`report about day 1`: https://mail.python.org/pipermail/pypy-dev/2005-July/002217.html +.. _`day 2 and day 3`: https://mail.python.org/pipermail/pypy-dev/2005-July/002220.html +.. _`day 4 and day 5`: https://mail.python.org/pipermail/pypy-dev/2005-July/002234.html +.. _`day 6`: https://mail.python.org/pipermail/pypy-dev/2005-July/002239.html +.. _`day 7`: https://mail.python.org/pipermail/pypy-dev/2005-August/002245.html .. _`Trillke-Gut`: http://www.trillke.net EuroPython 2005 sprints finished @@ -264,15 +258,15 @@ the LLVM backends and type inference in general. *(07/13/2005)* -.. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html -.. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html -.. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev +.. _`day 1`: https://mail.python.org/pipermail/pypy-dev/2005-June/002169.html +.. _`day 2`: https://mail.python.org/pipermail/pypy-dev/2005-June/002171.html +.. _`day 3`: https://mail.python.org/pipermail/pypy-dev/2005-June/002172.html +.. _`pypy-dev`: https://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html -.. _`sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-announcement.html -.. _`list of people coming`: http://codespeak.net/pypy/extradoc/sprintinfo/EP2005-people.html +.. _`sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-announcement.html +.. _`list of people coming`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/EP2005-people.html Duesseldorf PyPy sprint 2-9 June 2006 ================================================================== @@ -285,8 +279,8 @@ Read more in `the sprint announcement`_, see who is planning to attend on the `people page`_. -.. _`the sprint announcement`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/announce.html -.. _`people page`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/people.html +.. _`the sprint announcement`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/announce.txt +.. _`people page`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/people.txt PyPy at XP 2006 and Agile 2006 diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -66,58 +66,26 @@ Reflex ====== -This method is still experimental. It adds the `cppyy`_ module. -The method works by using the `Reflex package`_ to provide reflection -information of the C++ code, which is then used to automatically generate -bindings at runtime. -From a python standpoint, there is no difference between generating bindings -at runtime, or having them "statically" generated and available in scripts -or compiled into extension modules: python classes and functions are always -runtime structures, created when a script or module loads. +The builtin `cppyy`_ module uses reflection information, provided by +`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +automatically generate bindings at runtime. +In Python, classes and functions are always runtime structures, so when they +are generated matters not for performance. However, if the backend itself is capable of dynamic behavior, it is a much -better functional match to python, allowing tighter integration and more -natural language mappings. -Full details are `available here`_. +better functional match, allowing tighter integration and more natural +language mappings. + +The `cppyy`_ module is written in RPython, thus PyPy's JIT is able to remove +most cross-language call overhead. + +`Full details`_ are `available here`_. .. _`cppyy`: cppyy.html -.. _`reflex-support`: cppyy.html -.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex +.. _`installed separately`: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 +.. _`Reflex`: http://root.cern.ch/drupal/content/reflex +.. _`Full details`: cppyy.html .. _`available here`: cppyy.html -Pros ----- - -The cppyy module is written in RPython, which makes it possible to keep the -code execution visible to the JIT all the way to the actual point of call into -C++, thus allowing for a very fast interface. -Reflex is currently in use in large software environments in High Energy -Physics (HEP), across many different projects and packages, and its use can be -virtually completely automated in a production environment. -One of its uses in HEP is in providing language bindings for CPython. -Thus, it is possible to use Reflex to have bound code work on both CPython and -on PyPy. -In the medium-term, Reflex will be replaced by `cling`_, which is based on -`llvm`_. -This will affect the backend only; the python-side interface is expected to -remain the same, except that cling adds a lot of dynamic behavior to C++, -enabling further language integration. - -.. _`cling`: http://root.cern.ch/drupal/content/cling -.. _`llvm`: http://llvm.org/ - -Cons ----- - -C++ is a large language, and cppyy is not yet feature-complete. -Still, the experience gained in developing the equivalent bindings for CPython -means that adding missing features is a simple matter of engineering, not a -question of research. -The module is written so that currently missing features should do no harm if -you don't use them, if you do need a particular feature, it may be necessary -to work around it in python or with a C++ helper function. -Although Reflex works on various platforms, the bindings with PyPy have only -been tested on Linux. - RPython Mixed Modules ===================== diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -79,7 +79,7 @@ .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf -.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 +.. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf @@ -87,7 +87,7 @@ .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7 .. _`Automatic generation of JIT compilers for dynamic languages in .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ecoop2009/main.pdf .. _`Core Object Optimization Results`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D06.1_Core_Optimizations-2007-04-30.pdf -.. _`Compiling Dynamic Language Implementations`: http://codespeak.net/pypy/extradoc/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf +.. _`Compiling Dynamic Language Implementations`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D05.1_Publish_on_translating_a_very-high-level_description.pdf Talks and Presentations @@ -258,24 +258,24 @@ .. _`PyCon 2010`: http://morepypy.blogspot.com/2010/02/pycon-2010-report.html .. _`RuPy 2009`: http://morepypy.blogspot.com/2009/11/pypy-on-rupy-2009.html -.. _`PyPy 3000`: http://codespeak.net/pypy/extradoc/talk/ep2006/pypy3000.txt -.. _`What can PyPy do for you`: http://codespeak.net/pypy/extradoc/talk/ep2006/usecases-slides.html -.. _`PyPy introduction at EuroPython 2006`: http://codespeak.net/pypy/extradoc/talk/ep2006/intro.pdf -.. _`PyPy - the new Python implementation on the block`: http://codespeak.net/pypy/extradoc/talk/22c3/hpk-tech.html -.. _`PyPy development method`: http://codespeak.net/pypy/extradoc/talk/pycon2006/method_talk.html -.. _`PyPy intro`: http://codespeak.net/pypy/extradoc/talk/accu2006/accu-2006.pdf -.. _oscon2003-paper: http://codespeak.net/pypy/extradoc/talk/oscon2003-paper.html -.. _`Architecture introduction slides`: http://codespeak.net/pypy/extradoc/talk/amsterdam-sprint-intro.pdf -.. _`EU funding for FOSS`: http://codespeak.net/pypy/extradoc/talk/2004-21C3-pypy-EU-hpk.pdf -.. _`py lib slides`: http://codespeak.net/pypy/extradoc/talk/2005-pycon-py.pdf -.. _`PyCon 2005`: http://codespeak.net/pypy/extradoc/talk/pypy-talk-pycon2005/README.html -.. _`Trouble in Paradise`: http://codespeak.net/pypy/extradoc/talk/agile2006/during-oss-sprints_talk.pdf -.. _`Sprint Driven Development`: http://codespeak.net/pypy/extradoc/talk/xp2006/during-xp2006-sprints.pdf -.. _`Kill -1`: http://codespeak.net/pypy/extradoc/talk/ep2006/kill_1_agiletalk.pdf -.. _`Open Source, EU-Funding and Agile Methods`: http://codespeak.net/pypy/extradoc/talk/22c3/agility.pdf -.. _`PyPy Status`: http://codespeak.net/pypy/extradoc/talk/vancouver/talk.html +.. _`PyPy 3000`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/pypy3000.txt +.. _`What can PyPy do for you`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/usecases-slides.txt +.. _`PyPy introduction at EuroPython 2006`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/intro.pdf +.. _`PyPy - the new Python implementation on the block`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/hpk-tech.txt +.. _`PyPy development method`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2006/method_talk.txt +.. _`PyPy intro`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/accu2006/accu-2006.pdf +.. _oscon2003-paper: https://bitbucket.org/pypy/extradoc/raw/tip/talk/oscon2003-paper.txt +.. _`Architecture introduction slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/amsterdam-sprint-intro.pdf +.. _`EU funding for FOSS`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2004-21C3-pypy-EU-hpk.pdf +.. _`py lib slides`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/2005-pycon-py.pdf +.. _`PyCon 2005`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pypy-talk-pycon2005/README.txt +.. _`Trouble in Paradise`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/agile2006/during-oss-sprints_talk.pdf +.. _`Sprint Driven Development`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/xp2006/during-xp2006-sprints.pdf +.. _`Kill -1`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2006/kill_1_agiletalk.pdf +.. _`Open Source, EU-Funding and Agile Methods`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/22c3/agility.pdf +.. _`PyPy Status`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/vancouver/ .. _`Sprinting the PyPy way`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2005/pypy_sprinttalk_ep2005bd.pdf -.. _`PyPy's VM Approach`: http://codespeak.net/pypy/extradoc/talk/dls2006/talk.html +.. _`PyPy's VM Approach`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/ .. _`PyPy's approach to virtual machine construction`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dls2006/pypy-vm-construction.pdf .. _`EuroPython talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/ep2009/ .. _`PyCon talks 2009`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/pycon2009/ @@ -356,8 +356,6 @@ .. _`transparent dynamic optimization`: http://www.hpl.hp.com/techreports/1999/HPL-1999-77.pdf .. _Dynamo: http://www.hpl.hp.com/techreports/1999/HPL-1999-78.pdf .. _testdesign: coding-guide.html#test-design -.. _feasible: http://codespeak.net/pipermail/pypy-dev/2004q2/001289.html -.. _rock: http://codespeak.net/pipermail/pypy-dev/2004q1/001255.html .. _LLVM: http://llvm.org/ .. _IronPython: http://ironpython.codeplex.com/ .. _`Dynamic Native Optimization of Native Interpreters`: http://people.csail.mit.edu/gregs/dynamorio.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -318,7 +318,7 @@ To read more about the RPython limitations read the `RPython description`_. -.. _`RPython description`: coding-guide.html#restricted-python +.. _`RPython description`: coding-guide.html#rpython-definition --------------------------------------------------------------- Does RPython have anything to do with Zope's Restricted Python? @@ -429,12 +429,27 @@ Could we use LLVM? ------------------ -There is a (static) translation backend using LLVM in the branch -``llvm-translation-backend``. It can translate PyPy with or without the JIT on -Linux. +In theory yes. But we tried to use it 5 or 6 times already, as a +translation backend or as a JIT backend --- and failed each time. -Using LLVM as our JIT backend looks interesting as well -- we made an attempt, -but it failed: LLVM has no way to patch the generated machine code. +In more details: using LLVM as a (static) translation backend is +pointless nowadays because you can generate C code and compile it with +clang. (Note that compiling PyPy with clang gives a result that is not +faster than compiling it with gcc.) We might in theory get extra +benefits from LLVM's GC integration, but this requires more work on the +LLVM side before it would be remotely useful. Anyway, it could be +interfaced via a custom primitive in the C code. (The latest such +experimental backend is in the branch ``llvm-translation-backend``, +which can translate PyPy with or without the JIT on Linux.) + +On the other hand, using LLVM as our JIT backend looks interesting as +well --- but again we made an attempt, and it failed: LLVM has no way to +patch the generated machine code. + +So the position of the core PyPy developers is that if anyone wants to +make an N+1'th attempt with LLVM, they are welcome, and will be happy to +provide help in the IRC channel, but they are left with the burden of proof +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? @@ -444,6 +459,19 @@ .. _`getting-started`: getting-started-python.html +------------------------------------------ +Compiling PyPy swaps or runs out of memory +------------------------------------------ + +This is documented (here__ and here__). It needs 4 GB of RAM to run +"rpython targetpypystandalone" on top of PyPy, a bit more when running +on CPython. If you have less than 4 GB it will just swap forever (or +fail if you don't have enough swap). On 32-bit, divide the numbers by +two. + +.. __: http://pypy.org/download.html#building-from-source +.. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter + .. _`how do I compile my own interpreters`: ------------------------------------- diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -14,7 +14,7 @@ The present document describes the specific garbage collectors that we wrote in our framework. -.. _`EU-report on this topic`: http://codespeak.net/pypy/extradoc/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf +.. _`EU-report on this topic`: https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf Garbage collectors currently written for the GC framework diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -222,7 +222,7 @@ PyPy development always was and is still thoroughly test-driven. We use the flexible `py.test testing tool`_ which you can `install independently -`_ and use for other projects. +`_ and use for other projects. The PyPy source tree comes with an inlined version of ``py.test`` which you can invoke by typing:: @@ -264,7 +264,7 @@ interpreter. .. _`py.test testing tool`: http://pytest.org -.. _`py.test usage and invocations`: http://pytest.org/usage.html#usage +.. _`py.test usage and invocations`: http://pytest.org/latest/usage.html#usage Special Introspection Features of the Untranslated Python Interpreter --------------------------------------------------------------------- @@ -389,7 +389,7 @@ .. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html -.. _`py library`: http://pylib.org +.. _`py library`: http://pylib.readthedocs.org/ .. _`Spidermonkey`: http://www.mozilla.org/js/spidermonkey/ diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -76,7 +76,7 @@ .. code-block:: console $ curl -O http://python-distribute.org/distribute_setup.py - $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py + $ curl -O https://raw.githubusercontent.com/pypa/pip/master/contrib/get-pip.py $ ./pypy-2.1/bin/pypy distribute_setup.py $ ./pypy-2.1/bin/pypy get-pip.py $ ./pypy-2.1/bin/pip install pygments # for example diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -1,5 +1,3 @@ -.. include:: needswork.txt - .. _glossary: ******** diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,7 +28,10 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* update README +* merge PYPY_IRC_TOPIC environment variable handling from previous release + in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and + pypy/interpreter/app_main.py so release versions will not print a random + IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -92,7 +92,9 @@ `D07.1 Massive Parallelism and Translation Aspects`_ is a report about PyPy's optimization efforts, garbage collectors and massive parallelism (stackless) features. This report refers to the paper `PyPy's approach -to virtual machine construction`_. *(2007-02-28)* +to virtual machine construction`_. Extends the content previously +available in the document "Memory management and threading models as +translation aspects -- solutions and challenges". *(2007-02-28)* diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -103,7 +103,7 @@ .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org -.. _here: http://tismerysoft.de/pypy/irc-logs/pypy +.. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html diff --git a/pypy/doc/interpreter.rst b/pypy/doc/interpreter.rst --- a/pypy/doc/interpreter.rst +++ b/pypy/doc/interpreter.rst @@ -256,7 +256,7 @@ example and the higher level `chapter on Modules in the coding guide`_. -.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/tip/pypy/module/__builtin__/ +.. _`__builtin__ module`: https://bitbucket.org/pypy/pypy/src/default/pypy/module/__builtin__/ .. _`chapter on Modules in the coding guide`: coding-guide.html#modules .. _`Gateway classes`: diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -185,6 +185,6 @@ .. _`standard object space`: objspace.html#the-standard-object-space .. [D12.1] `High-Level Backends and Interpreter Feature Prototypes`, PyPy - EU-Report, 2007, http://codespeak.net/pypy/extradoc/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf + EU-Report, 2007, https://bitbucket.org/pypy/extradoc/raw/tip/eu-report/D12.1_H-L-Backends_and_Feature_Prototypes-2007-03-22.pdf .. include:: _ref.txt diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -339,44 +339,35 @@ Object types ------------ -The larger part of the `pypy/objspace/std/`_ package defines and implements the -library of Python's standard built-in object types. Each type (int, float, -list, tuple, str, type, etc.) is typically implemented by two modules: +The larger part of the `pypy/objspace/std/`_ package defines and +implements the library of Python's standard built-in object types. Each +type ``xxx`` (int, float, list, tuple, str, type, etc.) is typically +implemented in the module ``xxxobject.py``. -* the *type specification* module, which for a type ``xxx`` is called ``xxxtype.py``; +The ``W_AbstractXxxObject`` class, when present, is the abstract base +class, which mainly defines what appears on the Python-level type +object. There are then actual implementations as subclasses, which are +called ``W_XxxObject`` or some variant for the cases where we have +several different implementations. For example, +`pypy/objspace/std/bytesobject.py`_ defines ``W_AbstractBytesObject``, +which contains everything needed to build the ``str`` app-level type; +and there are subclasses ``W_BytesObject`` (the usual string) and +``W_StringBufferObject`` (a special implementation tweaked for repeated +additions, in `pypy/objspace/std/strbufobject.py`_). For mutable data +types like lists and dictionaries, we have a single class +``W_ListObject`` or ``W_DictMultiObject`` which has an indirection to +the real data and a strategy; the strategy can change as the content of +the object changes. -* the *implementation* module, called ``xxxobject.py``. - -The ``xxxtype.py`` module basically defines the type object itself. For -example, `pypy/objspace/std/listtype.py`_ contains the specification of the object you get when -you type ``list`` in a PyPy prompt. `pypy/objspace/std/listtype.py`_ enumerates the methods -specific to lists, like ``append()``. - -A particular method implemented by all types is the ``__new__()`` special -method, which in Python's new-style-classes world is responsible for creating -an instance of the type. In PyPy, ``__new__()`` locates and imports the module -implementing *instances* of the type, and creates such an instance based on the -arguments the user supplied to the constructor. For example, `pypy/objspace/std/tupletype.py`_ -defines ``__new__()`` to import the class ``W_TupleObject`` from -`pypy/objspace/std/tupleobject.py`_ and instantiate it. The `pypy/objspace/std/tupleobject.py`_ then contains a -"real" implementation of tuples: the way the data is stored in the -``W_TupleObject`` class, how the operations work, etc. - -The goal of the above module layout is to cleanly separate the Python -type object, visible to the user, and the actual implementation of its -instances. It is possible to provide *several* implementations of the -instances of the same Python type, by writing several ``W_XxxObject`` -classes. Every place that instantiates a new object of that Python type -can decide which ``W_XxxObject`` class to instantiate. - -From the user's point of view, the multiple internal ``W_XxxObject`` -classes are not visible: they are still all instances of exactly the -same Python type. PyPy knows that (e.g.) the application-level type of -its interpreter-level ``W_StringObject`` instances is str because -there is a ``typedef`` class attribute in ``W_StringObject`` which -points back to the string type specification from `pypy/objspace/std/stringtype.py`_; all -other implementations of strings use the same ``typedef`` from -`pypy/objspace/std/stringtype.py`_. +From the user's point of view, even when there are several +``W_AbstractXxxObject`` subclasses, this is not visible: at the +app-level, they are still all instances of exactly the same Python type. +PyPy knows that (e.g.) the application-level type of its +interpreter-level ``W_BytesObject`` instances is str because there is a +``typedef`` class attribute in ``W_BytesObject`` which points back to +the string type specification from `pypy/objspace/std/bytesobject.py`_; +all other implementations of strings use the same ``typedef`` from +`pypy/objspace/std/bytesobject.py`_. For other examples of multiple implementations of the same Python type, see `Standard Interpreter Optimizations`_. @@ -387,6 +378,9 @@ Multimethods ------------ +*Note: multimethods are on the way out. Although they look cool, +they failed to provide enough benefits.* + The Standard Object Space allows multiple object implementations per Python type - this is based on multimethods_. For a description of the multimethod variant that we implemented and which features it supports, @@ -491,7 +485,7 @@ Introduction ------------ -The task of the FlowObjSpace (the source is at `pypy/objspace/flow/`_) is to generate a control-flow graph from a +The task of the FlowObjSpace (the source is at `rpython/flowspace/`_) is to generate a control-flow graph from a function. This graph will also contain a trace of the individual operations, so that it is actually just an alternate representation for the function. diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.0.rst @@ -0,0 +1,146 @@ +======================================= +PyPy 2.3 - Easier Than Ever +======================================= + +We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python +language. This release updates the stdlib from 2.7.3, jumping directly to 2.7.6. + +This release also contains several bugfixes and performance improvements, +many generated by real users finding corner cases our `TDD`_ methods missed. +`CFFI`_ has made it easier than ever to use existing C code with both cpython +and PyPy, easing the transition for packages like `cryptography`_, `Pillow`_ +(Python Imaging Library [Fork]), a basic port of `pygame-cffi`_, and others. + +PyPy can now be embedded in a hosting application, for instance inside `uWSGI`_ + +You can download the PyPy 2.3 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We showed quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.2 is imminent. + +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs + +* `NumPy`_ which is included in the PyPy 2.3 release. More details below. + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _ `NumPy`: http://pypy.org/numpydonate.html +.. _`TDD`: http://doc.pypy.org/en/latest/how-to-contribute.html +.. _`CFFI`: http://cffi.readthedocs.org +.. _`cryptography`: https://cryptography.io +.. _`Pillow`: https://pypi.python.org/pypi/Pillow/2.4.0 +.. _`pygame-cffi`: https://github.com/CTPUG/pygame_cffi +.. _`uWSGI`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +Bugfixes +-------- + +Many issues were cleaned up after being reported by users to https://bugs.pypy.org or on IRC at #pypy. Note that we consider +performance slowdowns as bugs. Here is a summary of the user-facing changes; +for more information see `whats-new`_: + +* The ARM port no longer crashes on unaligned memory access to floats and doubles, + and singlefloats are supported in the JIT. + +* Generators are faster since they now skip unnecessary cleanup + +* A first time contributor simplified JIT traces by adding integer bound + propagation in indexing and logical operations. + +* Optimize consecutive dictionary lookups of the same key in a chain + +* Our extensive pre-translation test suite now runs nightly on more platforms + +* Fix issues with reimporting builtin modules + +* Fix an RPython bug with loop-unrolling that appeared in the `HippyVM`_ PHP port + +* Support for corner cases on objects with __int__ and __float__ methods + +* Fix multithreaded support for gethostbyname_ex and gethostbyaddr + +* Fix handling of tp_name for type objects + +.. _`HippyVM`: http://www.hippyvm.com +.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html + + +New Platforms and Features +-------------------------- + +* Support for OpenBSD + +* Code cleanup: we continue to prune out old and unused code, and to refactor + large parts of the codebase. We have separated RPython from the PyPy python + interpreter, and RPython is seeing use in other dynamic language projects. + +* Support for precompiled headers in the build process for MSVC + +* Tweak support of errno in cpyext (the PyPy implemenation of the capi) + + +NumPy +----- +NumPy support has been split into a builtin ``_numpy`` module and a +fork of the NumPy code base adapted to PyPy at + ``https://bitbucket.org/pypy/numpy``. +You need to install NumPy separately with a virtualenv: + ``pip install git+https://bitbucket.org/pypy/numpy.git``; + or directly: + ``git clone https://bitbucket.org/pypy/numpy.git``; + ``cd numpy``; ``pypy setup.py install``. + +* NumPy support has been improved, many failures in indexing, dtypes, + and scalars were corrected. We are slowly approaching our goal of passing + the NumPy test suite. We still do not support object or unicode ndarrays. + +* Speed of iteration in dot() is now within 1.5x of the NumPy c + implementation (without BLAS acceleration). Since the same array + iterator is used throughout the ``_numpy`` module, speed increases should + be apparent in all NumPy functionality. + +* Most of the core functionality of nditer has been implemented. + +* A cffi-based ``numpy.random`` module is available as a branch; + it will be merged soon after this release. + +* Enhancements to the PyPy JIT were made to support virtualizing the raw_store/raw_load + memory operations used in NumPy arrays. Further work remains here in virtualizing the + alloc_raw_storage when possible. This will allow scalars to have storages but still be + virtualized when possible in loops. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/cbuild.py Types @@ -56,7 +56,7 @@ flavor='raw'. There are several helpers like string -> char* converter, refer to the source for details. -.. _rffi: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/lltypesystem/rffi.py +.. _rffi: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/lltypesystem/rffi.py Registering function as external --------------------------------- @@ -68,4 +68,4 @@ functions, passing llimpl as an argument and eventually llfakeimpl as a fake low-level implementation for tests performed by an llinterp. -.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py +.. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/extfunc.py diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst --- a/pypy/doc/sandbox.rst +++ b/pypy/doc/sandbox.rst @@ -42,6 +42,10 @@ use this sandboxed PyPy from a regular Python interpreter (CPython, or an unsandboxed PyPy). Contributions welcome. +.. warning:: + + Tested with PyPy2. May not work out of the box with PyPy3. + Overview -------- diff --git a/pypy/doc/sprint-reports.rst b/pypy/doc/sprint-reports.rst --- a/pypy/doc/sprint-reports.rst +++ b/pypy/doc/sprint-reports.rst @@ -42,30 +42,30 @@ * `CERN (July 2010)`_ * `Düsseldorf (October 2010)`_ - .. _Hildesheim (Feb 2003): http://codespeak.net/pypy/extradoc/sprintinfo/HildesheimReport.html - .. _Gothenburg (May 2003): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2003-sprintreport.txt - .. _LovainLaNeuve (June 2003): http://codespeak.net/pypy/extradoc/sprintinfo/LouvainLaNeuveReport.txt - .. _Berlin (Sept 2003): http://codespeak.net/pypy/extradoc/sprintinfo/BerlinReport.txt - .. _Amsterdam (Dec 2003): http://codespeak.net/pypy/extradoc/sprintinfo/AmsterdamReport.txt - .. _Vilnius (Nov 2004): http://codespeak.net/pypy/extradoc/sprintinfo/vilnius-2004-sprintreport.txt - .. _Leysin (Jan 2005): http://codespeak.net/pypy/extradoc/sprintinfo/LeysinReport.txt - .. _PyCon/Washington (March 2005): http://codespeak.net/pypy/extradoc/sprintinfo/pycon_sprint_report.txt - .. _Europython/Gothenburg (June 2005): http://codespeak.net/pypy/extradoc/sprintinfo/ep2005-sprintreport.txt - .. _Hildesheim (July 2005): http://codespeak.net/pypy/extradoc/sprintinfo/hildesheim2005-sprintreport.txt - .. _Heidelberg (Aug 2005): http://codespeak.net/pypy/extradoc/sprintinfo/Heidelberg-report.txt - .. _Paris (Oct 2005): http://codespeak.net/pypy/extradoc/sprintinfo/paris/paris-report.txt - .. _Gothenburg (Dec 2005): http://codespeak.net/pypy/extradoc/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt - .. _Mallorca (Jan 2006): http://codespeak.net/pypy/extradoc/sprintinfo/mallorca/mallorca-sprintreport.txt - .. _LouvainLaNeuve (March 2006): http://codespeak.net/pypy/extradoc/sprintinfo/louvain-la-neuve-2006/report.txt - .. _Leysin (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2006-sprintreport.txt - .. _Tokyo (April 2006): http://codespeak.net/pypy/extradoc/sprintinfo/tokyo/sprint-report.txt - .. _Düsseldorf (June 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006/report1.txt - .. _Europython/Geneva (July 2006): http://codespeak.net/pypy/extradoc/sprintinfo/post-ep2006/report.txt - .. _Düsseldorf (October 2006): http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2006b/report.txt - .. _`Leysin (January 2007)`: http://codespeak.net/pypy/extradoc/sprintinfo/leysin-winter-2007/report.txt - .. _Hildesheim (Feb 2007): http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/sprint-report.txt - .. _`EU report writing sprint`: http://codespeak.net/pypy/extradoc/sprintinfo/trillke-2007/eu-report-sprint-report.txt - .. _`PyCon/Dallas (Feb 2006)`: http://codespeak.net/pypy/extradoc/sprintinfo/pycon06/sprint-report.txt + .. _Hildesheim (Feb 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/HildesheimReport.txt + .. _Gothenburg (May 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2003-sprintreport.txt + .. _LovainLaNeuve (June 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LouvainLaNeuveReport.txt + .. _Berlin (Sept 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/BerlinReport.txt + .. _Amsterdam (Dec 2003): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/AmsterdamReport.txt + .. _Vilnius (Nov 2004): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/vilnius-2004-sprintreport.txt + .. _Leysin (Jan 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/LeysinReport.txt + .. _PyCon/Washington (March 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon_sprint_report.txt + .. _Europython/Gothenburg (June 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ep2005-sprintreport.txt + .. _Hildesheim (July 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/hildesheim2005-sprintreport.txt + .. _Heidelberg (Aug 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/Heidelberg-report.txt + .. _Paris (Oct 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/paris/paris-report.txt + .. _Gothenburg (Dec 2005): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/gothenburg-2005/gothenburg-dec2005-sprintreport.txt + .. _Mallorca (Jan 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/mallorca/mallorca-sprintreport.txt + .. _LouvainLaNeuve (March 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/louvain-la-neuve-2006/report.txt + .. _Leysin (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2006-sprintreport.txt + .. _Tokyo (April 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/tokyo/sprint-report.txt + .. _Düsseldorf (June 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006/report1.txt + .. _Europython/Geneva (July 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/post-ep2006/report.txt + .. _Düsseldorf (October 2006): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/ddorf2006b/report.txt + .. _`Leysin (January 2007)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/leysin-winter-2007/report.txt + .. _Hildesheim (Feb 2007): https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/sprint-report.txt + .. _`EU report writing sprint`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/trillke-2007/eu-report-sprint-report.txt + .. _`PyCon/Dallas (Feb 2006)`: https://bitbucket.org/pypy/extradoc/raw/tip/sprintinfo/pycon06/sprint-report.txt .. _`Göteborg (November 2007)`: http://morepypy.blogspot.com/2007_11_01_archive.html .. _`Leysin (January 2008)`: http://morepypy.blogspot.com/2008/01/leysin-winter-sport-sprint-started.html .. _`Berlin (May 2008)`: http://morepypy.blogspot.com/2008_05_01_archive.html diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst --- a/pypy/doc/stackless.rst +++ b/pypy/doc/stackless.rst @@ -211,6 +211,9 @@ .. __: `recursion depth limit`_ +We also do not include any of the recent API additions to Stackless +Python, like ``set_atomic()``. Contributions welcome. + Recursion depth limit +++++++++++++++++++++ diff --git a/pypy/doc/statistic/index.rst b/pypy/doc/statistic/index.rst --- a/pypy/doc/statistic/index.rst +++ b/pypy/doc/statistic/index.rst @@ -1,3 +1,7 @@ +.. warning:: + + This page is no longer updated, of historical interest only. + ======================= PyPy Project Statistics ======================= diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -380,7 +380,7 @@ The RPython Typer ================= -https://bitbucket.org/pypy/pypy/src/default/pypy/rpython/ +https://bitbucket.org/pypy/pypy/src/default/rpython/rtyper/ The RTyper is the first place where the choice of backend makes a difference; as outlined above we are assuming that ANSI C is the target. @@ -603,7 +603,7 @@ From noreply at buildbot.pypy.org Wed May 7 12:55:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 12:55:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Avoid importing cycle Message-ID: <20140507105542.1F2221D23F1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71367:e7f570769ca1 Date: 2014-05-07 12:54 +0200 http://bitbucket.org/pypy/pypy/changeset/e7f570769ca1/ Log: Avoid importing cycle diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -31,7 +31,6 @@ 'signals_enabled': 'app_signal.signals_enabled', 'atomic': 'app_atomic.atomic', 'exclusive_atomic': 'app_atomic.exclusive_atomic', - 'error': 'app_atomic.error', } interpleveldefs = { '_signals_enter': 'interp_signal.signals_enter', @@ -42,6 +41,7 @@ 'longest_abort_info': 'interp_atomic.longest_abort_info', 'reset_longest_abort_info':'interp_atomic.reset_longest_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', + 'error': 'space.fromcache(pypy.module.thread.error.Cache).w_error', } def activate(self, space): return self.space.config.objspace.usemodules.thread diff --git a/pypy/module/__pypy__/app_atomic.py b/pypy/module/__pypy__/app_atomic.py --- a/pypy/module/__pypy__/app_atomic.py +++ b/pypy/module/__pypy__/app_atomic.py @@ -1,4 +1,3 @@ -from thread import error # re-exported from __pypy__ import thread class Atomic(object): From noreply at buildbot.pypy.org Wed May 7 15:12:23 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 7 May 2014 15:12:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: does_any_allocation in rewrite should be cleared on labels Message-ID: <20140507131223.B56201C3569@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71368:9e10d6372feb Date: 2014-05-07 13:41 +0200 http://bitbucket.org/pypy/pypy/changeset/9e10d6372feb/ Log: does_any_allocation in rewrite should be cleared on labels diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -36,6 +36,8 @@ _previous_size = -1 _op_malloc_nursery = None _v_last_malloced_nursery = None + + # does_any_alloc tells us if we did any allocation since the last LABEL does_any_allocation = False def __init__(self, gc_ll_descr, cpu): @@ -64,6 +66,7 @@ elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() self.known_lengths.clear() + self.does_any_allocation = False # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: if op.getopnum() == rop.SETFIELD_GC: diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1267,3 +1267,19 @@ i2 = stm_should_break_transaction(0) jump(i1, i2) """) + + def test_label_stm_should_break_allocation(self): + self.check_rewrite(""" + [] + p2 = new(descr=tdescr) + label() + i1 = stm_should_break_transaction(0) + jump(i1) + """, """ + [] + p2 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) + label() + i1 = stm_should_break_transaction(1) + jump(i1) + """) From noreply at buildbot.pypy.org Wed May 7 15:12:24 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 7 May 2014 15:12:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: revert the additional argument of stm_should_break_transaction() Message-ID: <20140507131224.F248D1C3569@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71369:6c68d7afe0c1 Date: 2014-05-07 13:52 +0200 http://bitbucket.org/pypy/pypy/changeset/6c68d7afe0c1/ Log: revert the additional argument of stm_should_break_transaction() diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -121,11 +121,8 @@ self.read_barrier_applied[v_ptr] = None def handle_should_break_transaction(self, op): - op1 = ResOperation(rop.STM_SHOULD_BREAK_TRANSACTION, - [ConstInt(not self.does_any_allocation)], - op.result) - self.newops.append(op1) - self.does_any_allocation = True + self.newops.append(op) + #self.does_any_allocation = True def must_apply_write_barrier(self, val, v=None): diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1233,11 +1233,11 @@ def test_stm_should_break_transaction_no_malloc(self): self.check_rewrite(""" [] - i1 = stm_should_break_transaction(0) + i1 = stm_should_break_transaction() jump(i1) """, """ [] - i1 = stm_should_break_transaction(1) + i1 = stm_should_break_transaction() jump(i1) """) @@ -1245,26 +1245,26 @@ self.check_rewrite(""" [] p2 = new(descr=tdescr) - i1 = stm_should_break_transaction(0) + i1 = stm_should_break_transaction() jump(i1) """, """ [] p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) - i1 = stm_should_break_transaction(0) + i1 = stm_should_break_transaction() jump(i1) """) def test_double_stm_should_break_allocation(self): self.check_rewrite(""" [] - i1 = stm_should_break_transaction(0) - i2 = stm_should_break_transaction(0) + i1 = stm_should_break_transaction() + i2 = stm_should_break_transaction() jump(i1, i2) """, """ [] - i1 = stm_should_break_transaction(1) - i2 = stm_should_break_transaction(0) + i1 = stm_should_break_transaction() + i2 = stm_should_break_transaction() jump(i1, i2) """) @@ -1273,13 +1273,13 @@ [] p2 = new(descr=tdescr) label() - i1 = stm_should_break_transaction(0) + i1 = stm_should_break_transaction() jump(i1) """, """ [] p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) label() - i1 = stm_should_break_transaction(1) + i1 = stm_should_break_transaction() jump(i1) """) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -619,7 +619,7 @@ self.patch_jump_for_descr(faildescr, rawstart) if self.cpu.gc_ll_descr.stm: rstm.partial_commit_and_resume_other_threads() - + self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -940,7 +940,7 @@ mc.copy_to_raw_memory(oldadr) if self.cpu.gc_ll_descr.stm: rstm.partial_commit_and_resume_other_threads() - + def dump(self, text): if not self.verbose: @@ -2554,7 +2554,7 @@ assert isinstance(reg, RegLoc) self.mc.MOV_rr(reg.value, ebp.value) - def _generate_cmp_break_transaction(self, increase_nursery=False): + def _generate_cmp_break_transaction(self): # emits the check with a CMP instruction: # pypy_stm_nursery_low_fill_mark < STM_SEGMENT->nursery_current # so if it is followed with a JB, it will follow the jump if @@ -2562,33 +2562,26 @@ # if not IS_X86_64: todo() # "needed for X86_64_SCRATCH_REG" - nf_adr = rstm.adr_nursery_free # STM_SEGMENT->nursery_current - assert rx86.fits_in_32bits(nf_adr) # nf_adr is in page 1 - self.mc.MOV_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) - if increase_nursery: - self.mc.ADD_ri(X86_64_SCRATCH_REG.value, WORD) - self.mc.MOV_jr((self.SEGMENT_GC, nf_adr), X86_64_SCRATCH_REG.value) psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark - psnlfm_adr -= stmtlocal.threadlocal_base() - assert rx86.fits_in_32bits(psnlfm_adr) # should be %fs-local - self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_TL, psnlfm_adr)) + self.mc.MOV(X86_64_SCRATCH_REG, self.heap_tl(psnlfm_adr)) + nf_adr = rstm.adr_nursery_free + assert rx86.fits_in_32bits(nf_adr) # because it is in the 2nd page + self.mc.CMP_rj(X86_64_SCRATCH_REG.value, (self.SEGMENT_GC, nf_adr)) def genop_stm_should_break_transaction(self, op, arglocs, result_loc): - increase_nursery = op.getarg(0).getint() - self._generate_cmp_break_transaction(increase_nursery=increase_nursery) + self._generate_cmp_break_transaction() rl = result_loc.lowest8bits() - self.mc.SET_ir(rx86.Conditions['A'], rl.value) + self.mc.SET_ir(rx86.Conditions['B'], rl.value) self.mc.MOVZX8_rr(result_loc.value, rl.value) def genop_guard_stm_should_break_transaction(self, op, guard_op, guard_token, arglocs, result_loc): - increase_nursery = op.getarg(0).getint() - self._generate_cmp_break_transaction(increase_nursery=increase_nursery) + self._generate_cmp_break_transaction() if guard_op.getopnum() == rop.GUARD_FALSE: - self.implement_guard(guard_token, 'A') # JA goes to "yes, break" + self.implement_guard(guard_token, 'B') # JB goes to "yes, break" else: - self.implement_guard(guard_token, 'BE') # JBE goes to "no, don't" + self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't" def genop_guard_stm_transaction_break(self, op, guard_op, guard_token, arglocs, result_loc): @@ -2601,9 +2594,9 @@ mc = self.mc self._generate_cmp_break_transaction() - # use JBE to jump over the following piece of code if we don't need + # use JAE to jump over the following piece of code if we don't need # to break the transaction now - mc.J_il(rx86.Conditions['BE'], 0xfffff) # patched later + mc.J_il(rx86.Conditions['AE'], 0xfffff) # patched later jae_location = mc.get_relative_pos() # This is the case in which we have to do the same as the logic diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -207,8 +207,7 @@ if val: # app-level loop: only one of these per loop is really needed resbox = history.BoxInt(0) - mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, - [history.CONST_FALSE], resbox) + mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) self.metainterp.heapcache.stm_break_done() return resbox else: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -494,7 +494,7 @@ 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', - 'STM_SHOULD_BREAK_TRANSACTION/1', # flag: increase nursery_current? + 'STM_SHOULD_BREAK_TRANSACTION/0', 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway From noreply at buildbot.pypy.org Wed May 7 15:12:26 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 7 May 2014 15:12:26 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: do a pseudo-allocation of WORD bytes when there is no other (checks for safe-points) Message-ID: <20140507131226.2629A1C3569@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71370:cd438825f621 Date: 2014-05-07 14:06 +0200 http://bitbucket.org/pypy/pypy/changeset/cd438825f621/ Log: do a pseudo-allocation of WORD bytes when there is no other (checks for safe-points) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -1,13 +1,11 @@ from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler -from rpython.jit.backend.llsupport.descr import ( - CallDescr, FieldDescr, InteriorFieldDescr, ArrayDescr) +from rpython.jit.backend.llsupport.descr import CallDescr, FieldDescr from rpython.jit.metainterp.resoperation import ResOperation, rop -from rpython.jit.metainterp.history import BoxPtr, ConstPtr, ConstInt +from rpython.jit.metainterp.history import BoxPtr, ConstInt from rpython.rlib.objectmodel import specialize -from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import (have_debug_prints, debug_start, debug_stop, debug_print) -from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.backend.llsupport.symbolic import WORD class GcStmRewriterAssembler(GcRewriterAssembler): @@ -121,8 +119,24 @@ self.read_barrier_applied[v_ptr] = None def handle_should_break_transaction(self, op): + if not self.does_any_allocation: + # do a fake allocation since this is needed to check + # for requested safe-points: + self.does_any_allocation = True + self.emitting_an_operation_that_can_collect() + + size = WORD + v_result = BoxPtr() + assert self._op_malloc_nursery is None # no ongoing allocation + malloc_op = ResOperation(rop.CALL_MALLOC_NURSERY, + [ConstInt(size)], v_result) + self._op_malloc_nursery = malloc_op + self.newops.append(malloc_op) + self._previous_size = size + self._v_last_malloced_nursery = v_result + self.write_barrier_applied[v_result] = None + self.newops.append(op) - #self.does_any_allocation = True def must_apply_write_barrier(self, val, v=None): @@ -148,7 +162,6 @@ debug_print("fallback for", op.repr()) def maybe_handle_raw_accesses(self, op): - from rpython.jit.backend.llsupport.descr import FieldDescr descr = op.getdescr() assert isinstance(descr, FieldDescr) if descr.stm_dont_track_raw_accesses: diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1237,6 +1237,7 @@ jump(i1) """, """ [] + p99 = call_malloc_nursery(8) i1 = stm_should_break_transaction() jump(i1) """) @@ -1263,6 +1264,7 @@ jump(i1, i2) """, """ [] + p99 = call_malloc_nursery(8) i1 = stm_should_break_transaction() i2 = stm_should_break_transaction() jump(i1, i2) @@ -1280,6 +1282,7 @@ p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) label() + p99 = call_malloc_nursery(8) i1 = stm_should_break_transaction() jump(i1) """) From noreply at buildbot.pypy.org Wed May 7 15:12:27 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 7 May 2014 15:12:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Of course the minimal size has to be 16 in case we actually hit the slow-path Message-ID: <20140507131227.40A721C3569@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71371:4169f1423e3d Date: 2014-05-07 14:33 +0200 http://bitbucket.org/pypy/pypy/changeset/4169f1423e3d/ Log: Of course the minimal size has to be 16 in case we actually hit the slow-path that asserts a size >= 16. So we can as well just use gen_malloc_nursery() diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -123,18 +123,12 @@ # do a fake allocation since this is needed to check # for requested safe-points: self.does_any_allocation = True - self.emitting_an_operation_that_can_collect() - size = WORD + # minimum size for the slowpath of MALLOC_NURSERY: + size = self.gc_ll_descr.minimal_size_in_nursery v_result = BoxPtr() assert self._op_malloc_nursery is None # no ongoing allocation - malloc_op = ResOperation(rop.CALL_MALLOC_NURSERY, - [ConstInt(size)], v_result) - self._op_malloc_nursery = malloc_op - self.newops.append(malloc_op) - self._previous_size = size - self._v_last_malloced_nursery = v_result - self.write_barrier_applied[v_result] = None + self.gen_malloc_nursery(size, v_result) self.newops.append(op) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -52,6 +52,7 @@ really_not_translated=True) self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( lambda cpu: False) # for now + self.gc_ll_descr.minimal_size_in_nursery = 16 # class FakeCPU(BaseFakeCPU): def sizeof(self, STRUCT): @@ -1237,7 +1238,7 @@ jump(i1) """, """ [] - p99 = call_malloc_nursery(8) + p99 = call_malloc_nursery(16) i1 = stm_should_break_transaction() jump(i1) """) @@ -1264,7 +1265,7 @@ jump(i1, i2) """, """ [] - p99 = call_malloc_nursery(8) + p99 = call_malloc_nursery(16) i1 = stm_should_break_transaction() i2 = stm_should_break_transaction() jump(i1, i2) @@ -1282,7 +1283,7 @@ p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) label() - p99 = call_malloc_nursery(8) + p99 = call_malloc_nursery(16) i1 = stm_should_break_transaction() jump(i1) """) From noreply at buildbot.pypy.org Wed May 7 15:32:18 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 7 May 2014 15:32:18 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Detect PowerPC using uname -r Message-ID: <20140507133218.3F4EA1D27BD@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71372:d9883c156e0e Date: 2014-02-20 14:00 +0000 http://bitbucket.org/pypy/pypy/changeset/d9883c156e0e/ Log: Detect PowerPC using uname -r diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -57,6 +57,7 @@ 'i86pc': MODEL_X86, # Solaris/Intel 'x86': MODEL_X86, # Apple 'Power Macintosh': MODEL_PPC_64, + 'ppc64': MODEL_PPC_64 'x86_64': MODEL_X86, 'amd64': MODEL_X86, # freebsd 'AMD64': MODEL_X86, # win64 From noreply at buildbot.pypy.org Wed May 7 15:32:19 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 7 May 2014 15:32:19 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Import old ppc backend Message-ID: <20140507133219.E1E621D27BD@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71373:2b2a0642fad9 Date: 2014-02-20 14:48 +0000 http://bitbucket.org/pypy/pypy/changeset/2b2a0642fad9/ Log: Import old ppc backend diff too long, truncating to 2000 out of 12103 lines diff --git a/pypy/jit/backend/ppc/_ppcgen.c b/pypy/jit/backend/ppc/_ppcgen.c new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/_ppcgen.c @@ -0,0 +1,154 @@ +#include +#include + +#define __dcbf(base, index) \ + __asm__ ("dcbf %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") + + +static PyTypeObject* mmap_type; + +#if defined(__APPLE__) + +#include + +static PyObject* +_ppy_NSLookupAndBindSymbol(PyObject* self, PyObject* args) +{ + char *s; + NSSymbol sym; + + if (!PyArg_ParseTuple(args, "s", &s)) + return NULL; + + if (!NSIsSymbolNameDefined(s)) { + return PyErr_Format(PyExc_ValueError, + "symbol '%s' not found", s); + } + + sym = NSLookupAndBindSymbol(s); + + return PyInt_FromLong((long)NSAddressOfSymbol(sym)); +} + + +#elif defined(linux) + +#include + +static PyObject* +_ppy_dlsym(PyObject* self, PyObject* args) +{ + char *s; + void *handle; + void *sym; + + if (!PyArg_ParseTuple(args, "s", &s)) + return NULL; + + handle = dlopen(RTLD_DEFAULT, RTLD_LAZY); + sym = dlsym(handle, s); + if (sym == NULL) { + return PyErr_Format(PyExc_ValueError, + "symbol '%s' not found", s); + } + return PyInt_FromLong((long)sym); +} + +#else + +#error "OS not supported" + +#endif + + +static PyObject* +_ppy_mmap_exec(PyObject* self, PyObject* args) +{ + PyObject* code_args; + PyObject* r; + PyObject* mmap_obj; + char* code; + size_t size; + + if (!PyArg_ParseTuple(args, "O!O!:mmap_exec", + mmap_type, &mmap_obj, + &PyTuple_Type, &code_args)) + return NULL; + + code = *((char**)mmap_obj + 2); + size = *((size_t*)mmap_obj + 3); + + r = ((PyCFunction)code)(NULL, code_args); + + Py_DECREF(args); + + return r; +} + +static PyObject* +_ppy_mmap_flush(PyObject* self, PyObject* arg) +{ + char* code; + size_t size; + int i = 0; + + if (!PyObject_TypeCheck(arg, mmap_type)) { + PyErr_SetString(PyExc_TypeError, + "mmap_flush: single argument must be mmap object"); + } + + code = *((char**)arg + 2); + size = *((size_t*)arg + 3); + + for (; i < size; i += 32){ + __dcbf(code, i); + } + + Py_INCREF(Py_None); + return Py_None; +} + + +PyMethodDef _ppy_methods[] = { +#if defined(__APPLE__) + {"NSLookupAndBindSymbol", _ppy_NSLookupAndBindSymbol, + METH_VARARGS, ""}, +#elif defined(linux) + {"dlsym", _ppy_dlsym, METH_VARARGS, ""}, +#endif + {"mmap_exec", _ppy_mmap_exec, METH_VARARGS, ""}, + {"mmap_flush", _ppy_mmap_flush, METH_O, ""}, + {0, 0} +}; + +#if !defined(MAP_ANON) && defined(__APPLE__) +#define MAP_ANON 0x1000 +#endif + +PyMODINIT_FUNC +init_ppcgen(void) +{ + PyObject* m; + PyObject* mmap_module; + PyObject* mmap_func; + PyObject* mmap_obj; + + m = Py_InitModule("_ppcgen", _ppy_methods); + + /* argh */ + /* time to campaign for a C API for the mmap module! */ + mmap_module = PyImport_ImportModule("mmap"); + if (!mmap_module) + return; + mmap_func = PyObject_GetAttrString(mmap_module, "mmap"); + if (!mmap_func) + return; + mmap_obj = PyEval_CallFunction(mmap_func, "iii", -1, 0, MAP_ANON); + if (!mmap_obj) + return; + mmap_type = mmap_obj->ob_type; + Py_INCREF(mmap_type); + Py_DECREF(mmap_obj); + Py_DECREF(mmap_func); + Py_DECREF(mmap_module); +} diff --git a/pypy/jit/backend/ppc/arch.py b/pypy/jit/backend/ppc/arch.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/arch.py @@ -0,0 +1,40 @@ +# Constants that depend on whether we are on 32-bit or 64-bit + +from pypy.jit.backend.ppc.register import (NONVOLATILES, + NONVOLATILES_FLOAT, + MANAGED_REGS, + MANAGED_FP_REGS) + +import sys +if sys.maxint == (2**31 - 1): + WORD = 4 + DWORD = 2 * WORD + IS_PPC_32 = True + BACKCHAIN_SIZE = 2 + FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * DWORD +else: + WORD = 8 + DWORD = 2 * WORD + IS_PPC_32 = False + BACKCHAIN_SIZE = 6 + FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * WORD + +IS_PPC_64 = not IS_PPC_32 +MY_COPY_OF_REGS = 0 + +FORCE_INDEX = WORD +GPR_SAVE_AREA = len(NONVOLATILES) * WORD +FLOAT_INT_CONVERSION = WORD +MAX_REG_PARAMS = 8 +MAX_FREG_PARAMS = 13 +# we need at most 5 instructions to load a constant +# and one instruction to patch the stack pointer +SIZE_LOAD_IMM_PATCH_SP = 6 + +FORCE_INDEX_OFS = (len(MANAGED_REGS) + len(MANAGED_FP_REGS)) * WORD + +# offset to LR in BACKCHAIN +if IS_PPC_32: + LR_BC_OFFSET = WORD +else: + LR_BC_OFFSET = 2 * WORD diff --git a/pypy/jit/backend/ppc/asmfunc.py b/pypy/jit/backend/ppc/asmfunc.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/asmfunc.py @@ -0,0 +1,39 @@ +import py +import mmap, struct + +from pypy.jit.backend.ppc.codebuf import MachineCodeBlockWrapper +from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager +from pypy.rpython.lltypesystem import lltype, rffi +from pypy.jit.backend.ppc.arch import IS_PPC_32, IS_PPC_64, WORD +from pypy.rlib.rarithmetic import r_uint + +_ppcgen = None + +def get_ppcgen(): + global _ppcgen + if _ppcgen is None: + _ppcgen = py.magic.autopath().dirpath().join('_ppcgen.c')._getpymodule() + return _ppcgen + +class AsmCode(object): + def __init__(self, size): + self.code = MachineCodeBlockWrapper() + if IS_PPC_64: + # allocate function descriptor - 3 doublewords + for i in range(6): + self.emit(r_uint(0)) + + def emit(self, word): + self.code.writechar(chr((word >> 24) & 0xFF)) + self.code.writechar(chr((word >> 16) & 0xFF)) + self.code.writechar(chr((word >> 8) & 0xFF)) + self.code.writechar(chr(word & 0xFF)) + + def get_function(self): + i = self.code.materialize(AsmMemoryManager(), []) + if IS_PPC_64: + p = rffi.cast(rffi.CArrayPtr(lltype.Signed), i) + p[0] = i + 3*WORD + # p[1], p[2] = ?? + t = lltype.FuncType([], lltype.Signed) + return rffi.cast(lltype.Ptr(t), i) diff --git a/pypy/jit/backend/ppc/assembler.py b/pypy/jit/backend/ppc/assembler.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/assembler.py @@ -0,0 +1,95 @@ +import os +from pypy.jit.backend.ppc import form +from pypy.jit.backend.ppc import asmfunc + +# don't be fooled by the fact that there's some separation between a +# generic assembler class and a PPC assembler class... there's +# certainly a RISC dependency in here, and quite possibly a PPC +# dependency or two too. I personally don't care :) + +class AssemblerException(Exception): + pass + +class Assembler(object): + def __init__(self): + self.insts = [] + self.labels = {} + self.rlabels = {} + + def reset(self): + self.insts = [] + self.labels = {} + self.rlabels = {} + + def label(self, name): + if name in self.labels: + raise AssemblerException, "duplicate label '%s'"%(name,) + self.labels[name] = len(self.insts)*4 + self.rlabels.setdefault(len(self.insts)*4, []).append(name) + + def labelname(self, base="L"): + i = 0 + while 1: + ln = base + str(i) + if ln not in self.labels: + return ln + i += 1 + + def get_number_of_ops(self): + return len(self.insts) + + # XXX don't need multiplication + def get_rel_pos(self): + return 4 * len(self.insts) + + def patch_op(self, index): + last = self.insts.pop() + self.insts[index] = last + + def assemble0(self, dump=os.environ.has_key('PPY_DEBUG')): + for i, inst in enumerate(self.insts): + for f in inst.lfields: + l = self.labels[inst.fields[f]] - 4*i + inst.fields[f] = l + buf = [] + for inst in self.insts: + buf.append(inst) + if dump: + for i in range(len(buf)): + inst = self.disassemble(buf[i], self.rlabels, i*4) + for lab in self.rlabels.get(4*i, []): + print "%s:"%(lab,) + print "\t%4d %s"%(4*i, inst) + return buf + + def assemble(self, dump=os.environ.has_key('PPY_DEBUG')): + c = asmfunc.AsmCode(len(self.insts)*4) + for i in self.insts: + c.emit(i) + + def get_assembler_function(self): + c = asmfunc.AsmCode(len(self.insts)*4) + for i in self.insts: + c.emit(i) + return c.get_function() + + def get_idescs(cls): + r = [] + for name in dir(cls): + a = getattr(cls, name) + if isinstance(a, form.IDesc): + r.append((name, a)) + return r + get_idescs = classmethod(get_idescs) + + def disassemble(cls, inst, labels={}, pc=0): + matches = [] + idescs = cls.get_idescs() + for name, idesc in idescs: + m = idesc.match(inst) + if m > 0: + matches.append((m, idesc, name)) + if matches: + score, idesc, name = max(matches) + return idesc.disassemble(name, inst, labels, pc) + disassemble = classmethod(disassemble) diff --git a/pypy/jit/backend/ppc/codebuilder.py b/pypy/jit/backend/ppc/codebuilder.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/codebuilder.py @@ -0,0 +1,1269 @@ +import os +from pypy.jit.backend.ppc.ppc_form import PPCForm as Form +from pypy.jit.backend.ppc.locations import RegisterLocation +from pypy.jit.backend.ppc.ppc_field import ppc_fields +from pypy.jit.backend.ppc.assembler import Assembler +from pypy.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64, + LR_BC_OFFSET) +import pypy.jit.backend.ppc.register as r +from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from pypy.rpython.lltypesystem import lltype, rffi +from pypy.jit.metainterp.resoperation import rop +from pypy.tool.udir import udir +from pypy.rlib.objectmodel import we_are_translated + +from pypy.translator.tool.cbuild import ExternalCompilationInfo +from pypy.jit.backend.ppc.rassemblermaker import make_rassembler + +A = Form("frD", "frA", "frB", "XO3", "Rc") +A1 = Form("frD", "frB", "XO3", "Rc") +A2 = Form("frD", "frA", "frC", "XO3", "Rc") +A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc") + +I = Form("LI", "AA", "LK") + +B = Form("BO", "BI", "BD", "AA", "LK") + +SC = Form("AA") # fudge + +DD = Form("rD", "rA", "SIMM") +DDO = Form("rD", "rA", "ds", "XO4") +DS = Form("rA", "rS", "UIMM") + +X = Form("XO1") +XS = Form("rA", "rS", "rB", "XO1", "Rc") +XSO = Form("rS", "rA", "rB", "XO1") +XD = Form("rD", "rA", "rB", "XO1") +XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc") +XO0 = Form("rD", "rA", "OE", "XO2", "Rc") +XDB = Form("frD", "frB", "XO1", "Rc") +XS0 = Form("rA", "rS", "XO1", "Rc") +X0 = Form("rA", "rB", "XO1") +XcAB = Form("crfD", "rA", "rB", "XO1") +XN = Form("rD", "rA", "NB", "XO1") +XL = Form("crbD", "crbA", "crbB", "XO1") +XL1 = Form("crfD", "crfS") +XL2 = Form("crbD", "XO1", "Rc") +XFL = Form("FM", "frB", "XO1", "Rc") +XFX = Form("CRM", "rS", "XO1") + +MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") +MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") +MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") +MDS = Form("rA", "rS", "rB", "mbe", "XO7", "Rc") + +class BasicPPCAssembler(Assembler): + + def disassemble(cls, inst, labels={}, pc=0): + cache = cls.__dict__.get('idesc cache') + if cache is None: + idescs = cls.get_idescs() + cache = {} + for n, i in idescs: + cache.setdefault(i.specializations[ppc_fields['opcode']], + []).append((n,i)) + setattr(cls, 'idesc cache', cache) + matches = [] + idescs = cache[ppc_fields['opcode'].decode(inst)] + for name, idesc in idescs: + m = idesc.match(inst) + if m > 0: + matches.append((m, idesc, name)) + if matches: + score, idesc, name = max(matches) + return idesc.disassemble(name, inst, labels, pc) + disassemble = classmethod(disassemble) + + # "basic" means no simplified mnemonics + + # I form + b = I(18, AA=0, LK=0) + ba = I(18, AA=1, LK=0) + bl = I(18, AA=0, LK=1) + bla = I(18, AA=1, LK=1) + + # B form + bc = B(16, AA=0, LK=0) + bcl = B(16, AA=0, LK=1) + bca = B(16, AA=1, LK=0) + bcla = B(16, AA=1, LK=1) + + # SC form + sc = SC(17, AA=1) # it's not really the aa field... + + # D form + addi = DD(14) + addic = DD(12) + addicx = DD(13) + addis = DD(15) + + andix = DS(28) + andisx = DS(29) + + cmpi = Form("crfD", "L", "rA", "SIMM")(11) + cmpi.default(L=0).default(crfD=0) + cmpli = Form("crfD", "L", "rA", "UIMM")(10) + cmpli.default(L=0).default(crfD=0) + + lbz = DD(34) + lbzu = DD(35) + ld = DDO(58, XO4=0) + ldu = DDO(58, XO4=1) + lfd = DD(50) + lfdu = DD(51) + lfs = DD(48) + lfsu = DD(49) + lha = DD(42) + lhau = DD(43) + lhz = DD(40) + lhzu = DD(41) + lmw = DD(46) + lwa = DDO(58, XO4=2) + lwz = DD(32) + lwzu = DD(33) + + mulli = DD(7) + ori = DS(24) + oris = DS(25) + + stb = DD(38) + stbu = DD(39) + std = DDO(62, XO4=0) + stdu = DDO(62, XO4=1) + stfd = DD(54) + stfdu = DD(55) + stfs = DD(52) + stfsu = DD(53) + sth = DD(44) + sthu = DD(45) + stmw = DD(47) + stw = DD(36) + stwu = DD(37) + + subfic = DD(8) + tdi = Form("TO", "rA", "SIMM")(2) + twi = Form("TO", "rA", "SIMM")(3) + xori = DS(26) + xoris = DS(27) + + # X form + + and_ = XS(31, XO1=28, Rc=0) + and_x = XS(31, XO1=28, Rc=1) + + andc_ = XS(31, XO1=60, Rc=0) + andc_x = XS(31, XO1=60, Rc=1) + + # is the L bit for 64 bit compares? hmm + cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0) + cmp.default(L=0).default(crfD=0) + cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32) + cmpl.default(L=0).default(crfD=0) + + cntlzd = XS0(31, XO1=58, Rc=0) + cntlzdx = XS0(31, XO1=58, Rc=1) + cntlzw = XS0(31, XO1=26, Rc=0) + cntlzwx = XS0(31, XO1=26, Rc=1) + + dcba = X0(31, XO1=758) + dcbf = X0(31, XO1=86) + dcbi = X0(31, XO1=470) + dcbst = X0(31, XO1=54) + dcbt = X0(31, XO1=278) + dcbtst = X0(31, XO1=246) + dcbz = X0(31, XO1=1014) + + eciwx = XD(31, XO1=310) + ecowx = XS(31, XO1=438, Rc=0) + + eieio = X(31, XO1=854) + + eqv = XS(31, XO1=284, Rc=0) + eqvx = XS(31, XO1=284, Rc=1) + + extsb = XS0(31, XO1=954, Rc=0) + extsbx = XS0(31, XO1=954, Rc=1) + + extsh = XS0(31, XO1=922, Rc=0) + extshx = XS0(31, XO1=922, Rc=1) + + extsw = XS0(31, XO1=986, Rc=0) + extswx = XS0(31, XO1=986, Rc=1) + + fabs = XDB(63, XO1=264, Rc=0) + fabsx = XDB(63, XO1=264, Rc=1) + + fcmpo = XcAB(63, XO1=32) + fcmpu = XcAB(63, XO1=0) + + fcfid = XDB(63, XO1=846, Rc=0) + fcfidx = XDB(63, XO1=846, Rc=1) + + fctid = XDB(63, XO1=814, Rc=0) + fctidx = XDB(63, XO1=814, Rc=1) + + fctidz = XDB(63, XO1=815, Rc=0) + fctidzx = XDB(63, XO1=815, Rc=1) + + fctiw = XDB(63, XO1=14, Rc=0) + fctiwx = XDB(63, XO1=14, Rc=1) + + fctiwz = XDB(63, XO1=15, Rc=0) + fctiwzx = XDB(63, XO1=15, Rc=1) + + fmr = XDB(63, XO1=72, Rc=0) + fmrx = XDB(63, XO1=72, Rc=1) + + fnabs = XDB(63, XO1=136, Rc=0) + fnabsx = XDB(63, XO1=136, Rc=1) + + fneg = XDB(63, XO1=40, Rc=0) + fnegx = XDB(63, XO1=40, Rc=1) + + frsp = XDB(63, XO1=12, Rc=0) + frspx = XDB(63, XO1=12, Rc=1) + + fsqrt = XDB(63, XO1=22, Rc=0) + + mffgpr = XS(31, XO1=607, Rc=0) + mftgpr = XS(31, XO1=735, Rc=0) + + icbi = X0(31, XO1=982) + + lbzux = XD(31, XO1=119) + lbzx = XD(31, XO1=87) + ldarx = XD(31, XO1=84) + ldux = XD(31, XO1=53) + ldx = XD(31, XO1=21) + lfdux = XD(31, XO1=631) + lfdx = XD(31, XO1=599) + lfsux = XD(31, XO1=567) + lfsx = XD(31, XO1=535) + lhaux = XD(31, XO1=375) + lhax = XD(31, XO1=343) + lhbrx = XD(31, XO1=790) + lhzux = XD(31, XO1=311) + lhzx = XD(31, XO1=279) + lswi = XD(31, XO1=597) + lswx = XD(31, XO1=533) + lwarx = XD(31, XO1=20) + lwaux = XD(31, XO1=373) + lwax = XD(31, XO1=341) + lwbrx = XD(31, XO1=534) + lwzux = XD(31, XO1=55) + lwzx = XD(31, XO1=23) + + mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64) + mcrxr = Form("crfD", "XO1")(31, XO1=512) + mfcr = Form("rD", "XO1")(31, XO1=19) + mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0) + mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1) + mfmsr = Form("rD", "XO1")(31, XO1=83) + mfsr = Form("rD", "SR", "XO1")(31, XO1=595) + mfsrin = XDB(31, XO1=659, Rc=0) + + add = XO(31, XO2=266, OE=0, Rc=0) + addx = XO(31, XO2=266, OE=0, Rc=1) + addo = XO(31, XO2=266, OE=1, Rc=0) + addox = XO(31, XO2=266, OE=1, Rc=1) + + addc = XO(31, XO2=10, OE=0, Rc=0) + addcx = XO(31, XO2=10, OE=0, Rc=1) + addco = XO(31, XO2=10, OE=1, Rc=0) + addcox = XO(31, XO2=10, OE=1, Rc=1) + + adde = XO(31, XO2=138, OE=0, Rc=0) + addex = XO(31, XO2=138, OE=0, Rc=1) + addeo = XO(31, XO2=138, OE=1, Rc=0) + addeox = XO(31, XO2=138, OE=1, Rc=1) + + addme = XO(31, rB=0, XO2=234, OE=0, Rc=0) + addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1) + addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0) + addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1) + + addze = XO(31, rB=0, XO2=202, OE=0, Rc=0) + addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1) + addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0) + addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1) + + bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0) + bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1) + + bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0) + bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1) + + crand = XL(19, XO1=257) + crandc = XL(19, XO1=129) + creqv = XL(19, XO1=289) + crnand = XL(19, XO1=225) + crnor = XL(19, XO1=33) + cror = XL(19, XO1=449) + crorc = XL(19, XO1=417) + crxor = XL(19, XO1=193) + + divd = XO(31, XO2=489, OE=0, Rc=0) + divdx = XO(31, XO2=489, OE=0, Rc=1) + divdo = XO(31, XO2=489, OE=1, Rc=0) + divdox = XO(31, XO2=489, OE=1, Rc=1) + + divdu = XO(31, XO2=457, OE=0, Rc=0) + divdux = XO(31, XO2=457, OE=0, Rc=1) + divduo = XO(31, XO2=457, OE=1, Rc=0) + divduox = XO(31, XO2=457, OE=1, Rc=1) + + divw = XO(31, XO2=491, OE=0, Rc=0) + divwx = XO(31, XO2=491, OE=0, Rc=1) + divwo = XO(31, XO2=491, OE=1, Rc=0) + divwox = XO(31, XO2=491, OE=1, Rc=1) + + divwu = XO(31, XO2=459, OE=0, Rc=0) + divwux = XO(31, XO2=459, OE=0, Rc=1) + divwuo = XO(31, XO2=459, OE=1, Rc=0) + divwuox = XO(31, XO2=459, OE=1, Rc=1) + + fadd = A(63, XO3=21, Rc=0) + faddx = A(63, XO3=21, Rc=1) + fadds = A(59, XO3=21, Rc=0) + faddsx = A(59, XO3=21, Rc=1) + + fdiv = A(63, XO3=18, Rc=0) + fdivx = A(63, XO3=18, Rc=1) + fdivs = A(59, XO3=18, Rc=0) + fdivsx = A(59, XO3=18, Rc=1) + + fmadd = A3(63, XO3=19, Rc=0) + fmaddx = A3(63, XO3=19, Rc=1) + fmadds = A3(59, XO3=19, Rc=0) + fmaddsx = A3(59, XO3=19, Rc=1) + + fmsub = A3(63, XO3=28, Rc=0) + fmsubx = A3(63, XO3=28, Rc=1) + fmsubs = A3(59, XO3=28, Rc=0) + fmsubsx = A3(59, XO3=28, Rc=1) + + fmul = A2(63, XO3=25, Rc=0) + fmulx = A2(63, XO3=25, Rc=1) + fmuls = A2(59, XO3=25, Rc=0) + fmulsx = A2(59, XO3=25, Rc=1) + + fnmadd = A3(63, XO3=31, Rc=0) + fnmaddx = A3(63, XO3=31, Rc=1) + fnmadds = A3(59, XO3=31, Rc=0) + fnmaddsx = A3(59, XO3=31, Rc=1) + + fnmsub = A3(63, XO3=30, Rc=0) + fnmsubx = A3(63, XO3=30, Rc=1) + fnmsubs = A3(59, XO3=30, Rc=0) + fnmsubsx = A3(59, XO3=30, Rc=1) + + fres = A1(59, XO3=24, Rc=0) + fresx = A1(59, XO3=24, Rc=1) + + frsp = A1(63, XO3=12, Rc=0) + frspx = A1(63, XO3=12, Rc=1) + + frsqrte = A1(63, XO3=26, Rc=0) + frsqrtex = A1(63, XO3=26, Rc=1) + + fsel = A3(63, XO3=23, Rc=0) + fselx = A3(63, XO3=23, Rc=1) + + frsqrt = A1(63, XO3=22, Rc=0) + frsqrtx = A1(63, XO3=22, Rc=1) + frsqrts = A1(59, XO3=22, Rc=0) + frsqrtsx = A1(59, XO3=22, Rc=1) + + fsub = A(63, XO3=20, Rc=0) + fsubx = A(63, XO3=20, Rc=1) + fsubs = A(59, XO3=20, Rc=0) + fsubsx = A(59, XO3=20, Rc=1) + + isync = X(19, XO1=150) + + mcrf = XL1(19) + + mfspr = Form("rD", "spr", "XO1")(31, XO1=339) + mftb = Form("rD", "spr", "XO1")(31, XO1=371) + + mtcrf = XFX(31, XO1=144) + + mtfsb0 = XL2(63, XO1=70, Rc=0) + mtfsb0x = XL2(63, XO1=70, Rc=1) + mtfsb1 = XL2(63, XO1=38, Rc=0) + mtfsb1x = XL2(63, XO1=38, Rc=1) + + mtfsf = XFL(63, XO1=711, Rc=0) + mtfsfx = XFL(63, XO1=711, Rc=1) + + mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0) + mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1) + + mtmsr = Form("rS", "XO1")(31, XO1=146) + + mtspr = Form("rS", "spr", "XO1")(31, XO1=467) + + mtsr = Form("rS", "SR", "XO1")(31, XO1=210) + mtsrin = Form("rS", "rB", "XO1")(31, XO1=242) + + mulhd = XO(31, OE=0, XO2=73, Rc=0) + mulhdx = XO(31, OE=0, XO2=73, Rc=1) + + mulhdu = XO(31, OE=0, XO2=9, Rc=0) + mulhdux = XO(31, OE=0, XO2=9, Rc=1) + + mulld = XO(31, OE=0, XO2=233, Rc=0) + mulldx = XO(31, OE=0, XO2=233, Rc=1) + mulldo = XO(31, OE=1, XO2=233, Rc=0) + mulldox = XO(31, OE=1, XO2=233, Rc=1) + + mulhw = XO(31, OE=0, XO2=75, Rc=0) + mulhwx = XO(31, OE=0, XO2=75, Rc=1) + + mulhwu = XO(31, OE=0, XO2=11, Rc=0) + mulhwux = XO(31, OE=0, XO2=11, Rc=1) + + mullw = XO(31, OE=0, XO2=235, Rc=0) + mullwx = XO(31, OE=0, XO2=235, Rc=1) + mullwo = XO(31, OE=1, XO2=235, Rc=0) + mullwox = XO(31, OE=1, XO2=235, Rc=1) + + nand = XS(31, XO1=476, Rc=0) + nandx = XS(31, XO1=476, Rc=1) + + neg = XO0(31, OE=0, XO2=104, Rc=0) + negx = XO0(31, OE=0, XO2=104, Rc=1) + nego = XO0(31, OE=1, XO2=104, Rc=0) + negox = XO0(31, OE=1, XO2=104, Rc=1) + + nor = XS(31, XO1=124, Rc=0) + norx = XS(31, XO1=124, Rc=1) + + or_ = XS(31, XO1=444, Rc=0) + or_x = XS(31, XO1=444, Rc=1) + + orc = XS(31, XO1=412, Rc=0) + orcx = XS(31, XO1=412, Rc=1) + + rfi = X(19, XO1=50) + + rfid = X(19, XO1=18) + + rldcl = MDS(30, XO7=8, Rc=0) + rldclx = MDS(30, XO7=8, Rc=1) + rldcr = MDS(30, XO7=9, Rc=0) + rldcrx = MDS(30, XO7=9, Rc=1) + + rldic = MDI(30, XO5=2, Rc=0) + rldicx = MDI(30, XO5=2, Rc=1) + rldicl = MDI(30, XO5=0, Rc=0) + rldiclx = MDI(30, XO5=0, Rc=1) + rldicr = MDI(30, XO5=1, Rc=0) + rldicrx = MDI(30, XO5=1, Rc=1) + rldimi = MDI(30, XO5=3, Rc=0) + rldimix = MDI(30, XO5=3, Rc=1) + + rlwimi = MI(20, Rc=0) + rlwimix = MI(20, Rc=1) + + rlwinm = MI(21, Rc=0) + rlwinmx = MI(21, Rc=1) + + rlwnm = MB(23, Rc=0) + rlwnmx = MB(23, Rc=1) + + sld = XS(31, XO1=27, Rc=0) + sldx = XS(31, XO1=27, Rc=1) + + slw = XS(31, XO1=24, Rc=0) + slwx = XS(31, XO1=24, Rc=1) + + srad = XS(31, XO1=794, Rc=0) + sradx = XS(31, XO1=794, Rc=1) + + sradi = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=0) + sradix = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=1) + + sraw = XS(31, XO1=792, Rc=0) + srawx = XS(31, XO1=792, Rc=1) + + srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0) + srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1) + + srd = XS(31, XO1=539, Rc=0) + srdx = XS(31, XO1=539, Rc=1) + + srw = XS(31, XO1=536, Rc=0) + srwx = XS(31, XO1=536, Rc=1) + + stbux = XSO(31, XO1=247) + stbx = XSO(31, XO1=215) + stdcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=214, Rc=1) + stdux = XSO(31, XO1=181) + stdx = XSO(31, XO1=149) + stfdux = XSO(31, XO1=759) + stfdx = XSO(31, XO1=727) + stfiwx = XSO(31, XO1=983) + stfsux = XSO(31, XO1=695) + stfsx = XSO(31, XO1=663) + sthbrx = XSO(31, XO1=918) + sthux = XSO(31, XO1=439) + sthx = XSO(31, XO1=407) + stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725) + stswx = XSO(31, XO1=661) + stwbrx = XSO(31, XO1=662) + stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1) + stwux = XSO(31, XO1=183) + stwx = XSO(31, XO1=151) + + subf = XO(31, XO2=40, OE=0, Rc=0) + subfx = XO(31, XO2=40, OE=0, Rc=1) + subfo = XO(31, XO2=40, OE=1, Rc=0) + subfox = XO(31, XO2=40, OE=1, Rc=1) + + subfc = XO(31, XO2=8, OE=0, Rc=0) + subfcx = XO(31, XO2=8, OE=0, Rc=1) + subfco = XO(31, XO2=8, OE=1, Rc=0) + subfcox = XO(31, XO2=8, OE=1, Rc=1) + + subfe = XO(31, XO2=136, OE=0, Rc=0) + subfex = XO(31, XO2=136, OE=0, Rc=1) + subfeo = XO(31, XO2=136, OE=1, Rc=0) + subfeox = XO(31, XO2=136, OE=1, Rc=1) + + subfme = XO0(31, OE=0, XO2=232, Rc=0) + subfmex = XO0(31, OE=0, XO2=232, Rc=1) + subfmeo = XO0(31, OE=1, XO2=232, Rc=0) + subfmeox= XO0(31, OE=1, XO2=232, Rc=1) + + subfze = XO0(31, OE=0, XO2=200, Rc=0) + subfzex = XO0(31, OE=0, XO2=200, Rc=1) + subfzeo = XO0(31, OE=1, XO2=200, Rc=0) + subfzeox= XO0(31, OE=1, XO2=200, Rc=1) + + sync = X(31, XO1=598) + + tlbia = X(31, XO1=370) + tlbie = Form("rB", "XO1")(31, XO1=306) + tlbsync = X(31, XO1=566) + + td = Form("TO", "rA", "rB", "XO1")(31, XO1=68) + tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4) + + xor = XS(31, XO1=316, Rc=0) + xorx = XS(31, XO1=316, Rc=1) + +class PPCAssembler(BasicPPCAssembler): + BA = BasicPPCAssembler + + # awkward mnemonics: + # mftb + # most of the branch mnemonics... + + # F.2 Simplified Mnemonics for Subtract Instructions + + def subi(self, rD, rA, value): + self.addi(rD, rA, -value) + def subis(self, rD, rA, value): + self.addis(rD, rA, -value) + def subic(self, rD, rA, value): + self.addic(rD, rA, -value) + def subicx(self, rD, rA, value): + self.addicx(rD, rA, -value) + + def sub(self, rD, rA, rB): + self.subf(rD, rB, rA) + def subc(self, rD, rA, rB): + self.subfc(rD, rB, rA) + def subx(self, rD, rA, rB): + self.subfx(rD, rB, rA) + def subcx(self, rD, rA, rB): + self.subfcx(rD, rB, rA) + def subo(self, rD, rA, rB): + self.subfo(rD, rB, rA) + def subco(self, rD, rA, rB): + self.subfco(rD, rB, rA) + def subox(self, rD, rA, rB): + self.subfox(rD, rB, rA) + def subcox(self, rD, rA, rB): + self.subfcox(rD, rB, rA) + + # F.3 Simplified Mnemonics for Compare Instructions + + cmpdi = BA.cmpi(L=1) + cmpwi = BA.cmpi(L=0) + cmpldi = BA.cmpli(L=1) + cmplwi = BA.cmpli(L=0) + cmpd = BA.cmp(L=1) + cmpw = BA.cmp(L=0) + cmpld = BA.cmpl(L=1) + cmplw = BA.cmpl(L=0) + + # F.4 Simplified Mnemonics for Rotate and Shift Instructions + + def extlwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b, 0, n-1) + + def extrwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b+n, 32-n, 31) + + def inslwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-b, b, b + n -1) + + def insrwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-(b+n), b, b + n -1) + + def rotlwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31) + + def rotrwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, 0, 31) + + def rotlw(self, rA, rS, rB): + self.rlwnm(rA, rS, rB, 0, 31) + + def slwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31-n) + + def srwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, n, 31) + + def sldi(self, rA, rS, n): + self.rldicr(rA, rS, n, 63-n) + + def srdi(self, rA, rS, n): + self.rldicl(rA, rS, 64-n, n) + + # F.5 Simplified Mnemonics for Branch Instructions + + # there's a lot of these! + bt = BA.bc(BO=12) + bf = BA.bc(BO=4) + bdnz = BA.bc(BO=16, BI=0) + bdnzt = BA.bc(BO=8) + bdnzf = BA.bc(BO=0) + bdz = BA.bc(BO=18) + bdzt = BA.bc(BO=10) + bdzf = BA.bc(BO=2) + + bta = BA.bca(BO=12) + bfa = BA.bca(BO=4) + bdnza = BA.bca(BO=16, BI=0) + bdnzta = BA.bca(BO=8) + bdnzfa = BA.bca(BO=0) + bdza = BA.bca(BO=18) + bdzta = BA.bca(BO=10) + bdzfa = BA.bca(BO=2) + + btl = BA.bcl(BO=12) + bfl = BA.bcl(BO=4) + bdnzl = BA.bcl(BO=16, BI=0) + bdnztl = BA.bcl(BO=8) + bdnzfl = BA.bcl(BO=0) + bdzl = BA.bcl(BO=18) + bdztl = BA.bcl(BO=10) + bdzfl = BA.bcl(BO=2) + + btla = BA.bcla(BO=12) + bfla = BA.bcla(BO=4) + bdnzla = BA.bcla(BO=16, BI=0) + bdnztla = BA.bcla(BO=8) + bdnzfla = BA.bcla(BO=0) + bdzla = BA.bcla(BO=18) + bdztla = BA.bcla(BO=10) + bdzfla = BA.bcla(BO=2) + + blr = BA.bclr(BO=20, BI=0) + btlr = BA.bclr(BO=12) + bflr = BA.bclr(BO=4) + bdnzlr = BA.bclr(BO=16, BI=0) + bdnztlr = BA.bclr(BO=8) + bdnzflr = BA.bclr(BO=0) + bdzlr = BA.bclr(BO=18, BI=0) + bdztlr = BA.bclr(BO=10) + bdzflr = BA.bclr(BO=2) + + bctr = BA.bcctr(BO=20, BI=0) + btctr = BA.bcctr(BO=12) + bfctr = BA.bcctr(BO=4) + + blrl = BA.bclrl(BO=20, BI=0) + btlrl = BA.bclrl(BO=12) + bflrl = BA.bclrl(BO=4) + bdnzlrl = BA.bclrl(BO=16, BI=0) + bdnztlrl = BA.bclrl(BO=8) + bdnzflrl = BA.bclrl(BO=0) + bdzlrl = BA.bclrl(BO=18, BI=0) + bdztlrl = BA.bclrl(BO=10) + bdzflrl = BA.bclrl(BO=2) + + bctrl = BA.bcctrl(BO=20, BI=0) + btctrl = BA.bcctrl(BO=12) + bfctrl = BA.bcctrl(BO=4) + + # these should/could take a[n optional] crf argument, but it's a + # bit hard to see how to arrange that. + + blt = BA.bc(BO=12, BI=0) + ble = BA.bc(BO=4, BI=1) + beq = BA.bc(BO=12, BI=2) + bge = BA.bc(BO=4, BI=0) + bgt = BA.bc(BO=12, BI=1) + bnl = BA.bc(BO=4, BI=0) + bne = BA.bc(BO=4, BI=2) + bng = BA.bc(BO=4, BI=1) + bso = BA.bc(BO=12, BI=3) + bns = BA.bc(BO=4, BI=3) + bun = BA.bc(BO=12, BI=3) + bnu = BA.bc(BO=4, BI=3) + + blta = BA.bca(BO=12, BI=0) + blea = BA.bca(BO=4, BI=1) + beqa = BA.bca(BO=12, BI=2) + bgea = BA.bca(BO=4, BI=0) + bgta = BA.bca(BO=12, BI=1) + bnla = BA.bca(BO=4, BI=0) + bnea = BA.bca(BO=4, BI=2) + bnga = BA.bca(BO=4, BI=1) + bsoa = BA.bca(BO=12, BI=3) + bnsa = BA.bca(BO=4, BI=3) + buna = BA.bca(BO=12, BI=3) + bnua = BA.bca(BO=4, BI=3) + + bltl = BA.bcl(BO=12, BI=0) + blel = BA.bcl(BO=4, BI=1) + beql = BA.bcl(BO=12, BI=2) + bgel = BA.bcl(BO=4, BI=0) + bgtl = BA.bcl(BO=12, BI=1) + bnll = BA.bcl(BO=4, BI=0) + bnel = BA.bcl(BO=4, BI=2) + bngl = BA.bcl(BO=4, BI=1) + bsol = BA.bcl(BO=12, BI=3) + bnsl = BA.bcl(BO=4, BI=3) + bunl = BA.bcl(BO=12, BI=3) + bnul = BA.bcl(BO=4, BI=3) + + bltla = BA.bcla(BO=12, BI=0) + blela = BA.bcla(BO=4, BI=1) + beqla = BA.bcla(BO=12, BI=2) + bgela = BA.bcla(BO=4, BI=0) + bgtla = BA.bcla(BO=12, BI=1) + bnlla = BA.bcla(BO=4, BI=0) + bnela = BA.bcla(BO=4, BI=2) + bngla = BA.bcla(BO=4, BI=1) + bsola = BA.bcla(BO=12, BI=3) + bnsla = BA.bcla(BO=4, BI=3) + bunla = BA.bcla(BO=12, BI=3) + bnula = BA.bcla(BO=4, BI=3) + + bltlr = BA.bclr(BO=12, BI=0) + blelr = BA.bclr(BO=4, BI=1) + beqlr = BA.bclr(BO=12, BI=2) + bgelr = BA.bclr(BO=4, BI=0) + bgtlr = BA.bclr(BO=12, BI=1) + bnllr = BA.bclr(BO=4, BI=0) + bnelr = BA.bclr(BO=4, BI=2) + bnglr = BA.bclr(BO=4, BI=1) + bsolr = BA.bclr(BO=12, BI=3) + bnslr = BA.bclr(BO=4, BI=3) + bunlr = BA.bclr(BO=12, BI=3) + bnulr = BA.bclr(BO=4, BI=3) + + bltctr = BA.bcctr(BO=12, BI=0) + blectr = BA.bcctr(BO=4, BI=1) + beqctr = BA.bcctr(BO=12, BI=2) + bgectr = BA.bcctr(BO=4, BI=0) + bgtctr = BA.bcctr(BO=12, BI=1) + bnlctr = BA.bcctr(BO=4, BI=0) + bnectr = BA.bcctr(BO=4, BI=2) + bngctr = BA.bcctr(BO=4, BI=1) + bsoctr = BA.bcctr(BO=12, BI=3) + bnsctr = BA.bcctr(BO=4, BI=3) + bunctr = BA.bcctr(BO=12, BI=3) + bnuctr = BA.bcctr(BO=4, BI=3) + + bltlrl = BA.bclrl(BO=12, BI=0) + blelrl = BA.bclrl(BO=4, BI=1) + beqlrl = BA.bclrl(BO=12, BI=2) + bgelrl = BA.bclrl(BO=4, BI=0) + bgtlrl = BA.bclrl(BO=12, BI=1) + bnllrl = BA.bclrl(BO=4, BI=0) + bnelrl = BA.bclrl(BO=4, BI=2) + bnglrl = BA.bclrl(BO=4, BI=1) + bsolrl = BA.bclrl(BO=12, BI=3) + bnslrl = BA.bclrl(BO=4, BI=3) + bunlrl = BA.bclrl(BO=12, BI=3) + bnulrl = BA.bclrl(BO=4, BI=3) + + bltctrl = BA.bcctrl(BO=12, BI=0) + blectrl = BA.bcctrl(BO=4, BI=1) + beqctrl = BA.bcctrl(BO=12, BI=2) + bgectrl = BA.bcctrl(BO=4, BI=0) + bgtctrl = BA.bcctrl(BO=12, BI=1) + bnlctrl = BA.bcctrl(BO=4, BI=0) + bnectrl = BA.bcctrl(BO=4, BI=2) + bngctrl = BA.bcctrl(BO=4, BI=1) + bsoctrl = BA.bcctrl(BO=12, BI=3) + bnsctrl = BA.bcctrl(BO=4, BI=3) + bunctrl = BA.bcctrl(BO=12, BI=3) + bnuctrl = BA.bcctrl(BO=4, BI=3) + + # whew! and we haven't even begun the predicted versions... + + # F.6 Simplified Mnemonics for Condition Register + # Logical Instructions + + crset = BA.creqv(crbA="crbD", crbB="crbD") + crclr = BA.crxor(crbA="crbD", crbB="crbD") + crmove = BA.cror(crbA="crbB") + crnot = BA.crnor(crbA="crbB") + + # F.7 Simplified Mnemonics for Trap Instructions + + trap = BA.tw(TO=31, rA=0, rB=0) + twlt = BA.tw(TO=16) + twle = BA.tw(TO=20) + tweq = BA.tw(TO=4) + twge = BA.tw(TO=12) + twgt = BA.tw(TO=8) + twnl = BA.tw(TO=12) + twng = BA.tw(TO=24) + twllt = BA.tw(TO=2) + twlle = BA.tw(TO=6) + twlge = BA.tw(TO=5) + twlgt = BA.tw(TO=1) + twlnl = BA.tw(TO=5) + twlng = BA.tw(TO=6) + + twlti = BA.twi(TO=16) + twlei = BA.twi(TO=20) + tweqi = BA.twi(TO=4) + twgei = BA.twi(TO=12) + twgti = BA.twi(TO=8) + twnli = BA.twi(TO=12) + twnei = BA.twi(TO=24) + twngi = BA.twi(TO=20) + twllti = BA.twi(TO=2) + twllei = BA.twi(TO=6) + twlgei = BA.twi(TO=5) + twlgti = BA.twi(TO=1) + twlnli = BA.twi(TO=5) + twlngi = BA.twi(TO=6) + + # F.8 Simplified Mnemonics for Special-Purpose + # Registers + + mfctr = BA.mfspr(spr=9) + mflr = BA.mfspr(spr=8) + mftbl = BA.mftb(spr=268) + mftbu = BA.mftb(spr=269) + mfxer = BA.mfspr(spr=1) + + mtctr = BA.mtspr(spr=9) + mtlr = BA.mtspr(spr=8) + mtxer = BA.mtspr(spr=1) + + # F.9 Recommended Simplified Mnemonics + + nop = BA.ori(rS=0, rA=0, UIMM=0) + + li = BA.addi(rA=0) + lis = BA.addis(rA=0) + + mr = BA.or_(rB="rS") + mrx = BA.or_x(rB="rS") + + not_ = BA.nor(rB="rS") + not_x = BA.norx(rB="rS") + + mtcr = BA.mtcrf(CRM=0xFF) + +PPCAssembler = make_rassembler(PPCAssembler) + +def hi(w): + return w >> 16 + +def ha(w): + if (w >> 15) & 1: + return (w >> 16) + 1 + else: + return w >> 16 + +def lo(w): + return w & 0x0000FFFF + +def la(w): + v = w & 0x0000FFFF + if v & 0x8000: + return -((v ^ 0xFFFF) + 1) # "sign extend" to 32 bits + return v + +def highest(w): + return w >> 48 + +def higher(w): + return (w >> 32) & 0x0000FFFF + +def high(w): + return (w >> 16) & 0x0000FFFF + +# XXX check this +if we_are_translated(): + eci = ExternalCompilationInfo(includes = ['asm_ppc.h']) + + flush_icache = rffi.llexternal( + "LL_flush_icache", + [lltype.Signed, lltype.Signed], + lltype.Void, + compilation_info=eci, + _nowrapper=True, + sandboxsafe=True) +else: + def flush_icache(x, y): pass + +class GuardToken(object): + def __init__(self, descr, failargs, faillocs, offset, + save_exc=False, is_invalidate=False): + self.descr = descr + self.offset = offset + self.is_invalidate = is_invalidate + self.failargs = failargs + self.faillocs = faillocs + self.save_exc = save_exc + +class OverwritingBuilder(PPCAssembler): + def __init__(self, cb, start, num_insts): + PPCAssembler.__init__(self) + self.cb = cb + self.index = start + self.num_insts = num_insts + + def currpos(self): + assert 0, "not implemented" + + def overwrite(self): + assert len(self.insts) <= self.num_insts + startindex = self.index / 4 + for i, new_inst in enumerate(self.insts): + self.cb.insts[i + startindex] = new_inst + +class PPCBuilder(BlockBuilderMixin, PPCAssembler): + def __init__(self, failargs_limit=1000, r0_in_use=False): + PPCAssembler.__init__(self) + self.init_block_builder() + self.r0_in_use = r0_in_use + self.ops_offset = {} + + def mark_op(self, op): + pos = self.get_relative_pos() + self.ops_offset[op] = pos + + def check(self, desc, v, *args): + desc.__get__(self)(*args) + ins = self.insts.pop() + expected = ins.assemble() + if expected < 0: + expected += 1<<32 + assert v == expected + + def load_imm(self, dest_reg, word): + rD = dest_reg.value + if word <= 32767 and word >= -32768: + self.li(rD, word) + elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648): + self.lis(rD, hi(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + else: + self.load_imm(dest_reg, word>>32) + self.sldi(rD, rD, 32) + if word & 0xFFFF0000 != 0: + self.oris(rD, rD, high(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + + def load_from_addr(self, rD, addr): + self.load_imm(rD, addr) + if IS_PPC_32: + self.lwzx(rD.value, 0, rD.value) + else: + self.ldx(rD.value, 0, rD.value) + + def store_reg(self, source_reg, addr): + with scratch_reg(self): + self.load_imm(r.SCRATCH, addr) + if IS_PPC_32: + self.stwx(source_reg.value, 0, r.SCRATCH.value) + else: + self.stdx(source_reg.value, 0, r.SCRATCH.value) + + def b_offset(self, target): + curpos = self.currpos() + offset = target - curpos + assert offset < (1 << 24) + self.b(offset) + + def b_cond_offset(self, offset, condition): + BI = condition[0] + BO = condition[1] + + pos = self.currpos() + target_ofs = offset - pos + self.bc(BO, BI, target_ofs) + + def b_cond_abs(self, addr, condition): + BI = condition[0] + BO = condition[1] + + with scratch_reg(self): + self.load_imm(r.SCRATCH, addr) + self.mtctr(r.SCRATCH.value) + self.bcctr(BO, BI) + + def b_abs(self, address, trap=False): + with scratch_reg(self): + self.load_imm(r.SCRATCH, address) + self.mtctr(r.SCRATCH.value) + if trap: + self.trap() + self.bctr() + + def bl_abs(self, address): + with scratch_reg(self): + self.load_imm(r.SCRATCH, address) + self.mtctr(r.SCRATCH.value) + self.bctrl() + + def call(self, address): + """ do a call to an absolute address + """ + with scratch_reg(self): + if IS_PPC_32: + self.load_imm(r.SCRATCH, address) + else: + self.store(r.TOC.value, r.SP.value, 5 * WORD) + self.load_imm(r.r11, address) + self.load(r.SCRATCH.value, r.r11.value, 0) + self.load(r.TOC.value, r.r11.value, WORD) + self.load(r.r11.value, r.r11.value, 2 * WORD) + self.mtctr(r.SCRATCH.value) + self.bctrl() + + if IS_PPC_64: + self.load(r.TOC.value, r.SP.value, 5 * WORD) + + def call_register(self, call_reg): + """ do a call to an address given in a register + """ + assert isinstance(call_reg, RegisterLocation) + with scratch_reg(self): + if IS_PPC_32: + self.mr(r.SCRATCH.value, call_reg.value) + else: + self.store(r.TOC.value, r.SP.value, 5 * WORD) + self.mr(r.r11.value, call_reg.value) + self.load(r.SCRATCH.value, r.r11.value, 0) + self.load(r.TOC.value, r.r11.value, WORD) + self.load(r.r11.value, r.r11.value, 2 * WORD) + self.mtctr(r.SCRATCH.value) + self.bctrl() + + if IS_PPC_64: + self.load(r.TOC.value, r.SP.value, 5 * WORD) + + def make_function_prologue(self, frame_size): + """ Build a new stackframe of size frame_size + and store the LR in the previous frame. + """ + with scratch_reg(self): + self.store_update(r.SP.value, r.SP.value, -frame_size) + self.mflr(r.SCRATCH.value) + self.store(r.SCRATCH.value, r.SP.value, frame_size + LR_BC_OFFSET) + + def restore_LR_from_caller_frame(self, frame_size): + """ Restore the LR from the calling frame. + frame_size is the size of the current frame. + """ + with scratch_reg(self): + lr_offset = frame_size + LR_BC_OFFSET + self.load(r.SCRATCH.value, r.SP.value, lr_offset) + self.mtlr(r.SCRATCH.value) + + def load(self, target_reg, base_reg, offset): + if IS_PPC_32: + self.lwz(target_reg, base_reg, offset) + else: + self.ld(target_reg, base_reg, offset) + + def loadx(self, target_reg, base_reg, offset_reg): + if IS_PPC_32: + self.lwzx(target_reg, base_reg, offset_reg) + else: + self.ldx(target_reg, base_reg, offset_reg) + + def store(self, from_reg, base_reg, offset): + if IS_PPC_32: + self.stw(from_reg, base_reg, offset) + else: + self.std(from_reg, base_reg, offset) + + def storex(self, from_reg, base_reg, offset_reg): + if IS_PPC_32: + self.stwx(from_reg, base_reg, offset_reg) + else: + self.stdx(from_reg, base_reg, offset_reg) + + def store_update(self, target_reg, from_reg, offset): + if IS_PPC_32: + self.stwu(target_reg, from_reg, offset) + else: + self.stdu(target_reg, from_reg, offset) + + def srli_op(self, target_reg, from_reg, numbits): + if IS_PPC_32: + self.srwi(target_reg, from_reg, numbits) + else: + self.srdi(target_reg, from_reg, numbits) + + def sl_op(self, target_reg, from_reg, numbit_reg): + if IS_PPC_32: + self.slw(target_reg, from_reg, numbit_reg) + else: + self.sld(target_reg, from_reg, numbit_reg) + + def prepare_insts_blocks(self, show=False): + insts = self.insts + for inst in insts: + self.write32(inst) + + def _dump_trace(self, addr, name, formatter=-1): + if not we_are_translated(): + if formatter != -1: + name = name % formatter + dir = udir.ensure('asm', dir=True) + f = dir.join(name).open('wb') + data = rffi.cast(rffi.CCHARP, addr) + for i in range(self.currpos()): + f.write(data[i]) + f.close() + + def write32(self, word): + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) + + def write64(self, word): + self.writechar(chr((word >> 56) & 0xFF)) + self.writechar(chr((word >> 48) & 0xFF)) + self.writechar(chr((word >> 40) & 0xFF)) + self.writechar(chr((word >> 32) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) + + def currpos(self): + return self.get_rel_pos() + + def flush_cache(self, addr): + startaddr = rffi.cast(lltype.Signed, addr) + size = rffi.cast(lltype.Signed, self.get_relative_pos()) + flush_icache(startaddr, size) + + def copy_to_raw_memory(self, addr): + self._copy_to_raw_memory(addr) + self.flush_cache(addr) + self._dump(addr, "jit-backend-dump", 'ppc') + + def cmp_op(self, block, a, b, imm=False, signed=True, fp=False): + if fp == True: + self.fcmpu(block, a, b) + elif IS_PPC_32: + if signed: + if imm: + # 32 bit immediate signed + self.cmpwi(block, a, b) + else: + # 32 bit signed + self.cmpw(block, a, b) + else: + if imm: + # 32 bit immediate unsigned + self.cmplwi(block, a, b) + else: + # 32 bit unsigned + self.cmplw(block, a, b) + else: + if signed: + if imm: + # 64 bit immediate signed + self.cmpdi(block, a, b) + else: + # 64 bit signed + self.cmpd(block, a, b) + else: + if imm: + # 64 bit immediate unsigned + self.cmpldi(block, a, b) + else: + # 64 bit unsigned + self.cmpld(block, a, b) + + def alloc_scratch_reg(self): + assert not self.r0_in_use + self.r0_in_use = True + + def free_scratch_reg(self): + assert self.r0_in_use + self.r0_in_use = False + +class scratch_reg(object): + def __init__(self, mc): + self.mc = mc + + def __enter__(self): + self.mc.alloc_scratch_reg() + + def __exit__(self, *args): + self.mc.free_scratch_reg() + +class BranchUpdater(PPCAssembler): + def __init__(self): + PPCAssembler.__init__(self) + self.init_block_builder() + + def write_to_mem(self, addr): + self.assemble() + self.copy_to_raw_memory(addr) + + def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): + insns = self.assemble0(dump) + for i in insns: + self.emit(i) + +def b(n): + r = [] + for i in range(32): + r.append(n&1) + n >>= 1 + r.reverse() + return ''.join(map(str, r)) + +def make_operations(): + def not_implemented(builder, trace_op, cpu, *rest_args): + import pdb; pdb.set_trace() + + oplist = [None] * (rop._LAST + 1) + for key, val in rop.__dict__.items(): + if key.startswith("_"): + continue + opname = key.lower() + methname = "emit_%s" % opname + if hasattr(PPCBuilder, methname): + oplist[val] = getattr(PPCBuilder, methname).im_func + else: + oplist[val] = not_implemented + return oplist + +PPCBuilder.operations = make_operations() diff --git a/pypy/jit/backend/ppc/condition.py b/pypy/jit/backend/ppc/condition.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/condition.py @@ -0,0 +1,21 @@ +# CONDITION = (BI (number of bit tested in CR), BO (12 if bit is 1, else 4)) + +SET = 12 +UNSET = 4 + +LE = (1, UNSET) +NE = (2, UNSET) +GT = (1, SET) +LT = (0, SET) +EQ = (2, SET) +GE = (0, UNSET) + +# values below are random ... + +U_LT = 50 +U_LE = 60 +U_GT = 70 +U_GE = 80 + +IS_TRUE = 90 +IS_ZERO = 100 diff --git a/pypy/jit/backend/ppc/field.py b/pypy/jit/backend/ppc/field.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/field.py @@ -0,0 +1,62 @@ +# only a small file, but there's some hairy stuff in here! +""" +>>> f = Field('test', 16, 31) +>>> f + +>>> f.encode(65535) +65535 +>>> f.encode(65536) +Traceback (most recent call last): + File \"\", line 1, in ? + File \"field.py\", line 25, in encode + raise ValueError(\"field '%s' can't accept value %s\" +ValueError: field 'test' can't accept value 65536 +>>> + +""" + + +class Field(object): + def __init__(self, name, left, right, signedness=False, valclass=int, overlap=False): + self.name = name + self.left = left + self.right = right + width = self.right - self.left + 1 + # mask applies before shift! + self.mask = 2**width - 1 + self.signed = signedness == 'signed' + self.valclass = valclass + self.overlap = overlap == 'overlap' + def __repr__(self): + return ''%(self.name,) + def encode(self, value): + if not issubclass(self.valclass, type(value)): + raise ValueError("field '%s' takes '%s's, not '%s's" + %(self.name, self.valclass.__name__, type(value).__name__)) + if not self.signed and value < 0: + raise ValueError("field '%s' is unsigned and can't accept value %d" + %(self.name, value)) + # that this does the right thing is /not/ obvious (but true!) + if ((value >> 31) ^ value) & ~(self.mask >> self.signed): + raise ValueError("field '%s' can't accept value %s" + %(self.name, value)) + value &= self.mask + value = long(value) + value <<= (32 - self.right - 1) + if value & 0x80000000L: + # yuck: + return ~int((~value)&0xFFFFFFFFL) + else: + return int(value) + def decode(self, inst): + mask = self.mask + v = (inst >> 32 - self.right - 1) & mask + if self.signed and (~mask >> 1) & mask & v: + v = ~(~v&mask) + return self.valclass(v) + def r(self, v, labels, pc): + return self.decode(v) + +if __name__=='__main__': + import doctest + doctest.testmod() diff --git a/pypy/jit/backend/ppc/form.py b/pypy/jit/backend/ppc/form.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/form.py @@ -0,0 +1,194 @@ + +# XXX there is much grot here. + +# some of this comes from trying to present a reasonably intuitive and +# useful interface, which implies a certain amount of DWIMmery. +# things surely still could be more transparent. + +class FormException(Exception): + pass + + +class Instruction(object): + def __init__(self, fields): + self.fields = fields + self.lfields = [k for (k,v) in fields.iteritems() + if isinstance(v, str)] + #if not self.lfields: + # self.assemble() # for error checking only + def assemble(self): + r = 0 + for field in self.fields: + r |= field.encode(self.fields[field]) + return r + + +class IBoundDesc(object): + def __init__(self, desc, fieldmap, assembler): + self.fieldmap = fieldmap + self.desc = desc + self.assembler = assembler + def calc_fields(self, args, kw): + fieldsleft = list(self.desc.fields) + fieldvalues = {} + for fname in kw: + kw[fname] = self.fieldmap[fname] + for d in (self.desc.specializations, kw): + for field in d: + fieldsleft.remove(field) + fieldvalues[field] = d[field] + for i in range(min(len(self.desc.defaults), len(fieldsleft) - len(args))): + f, v = self.desc.defaults[i] + fieldvalues[f] = v + fieldsleft.remove(f) + for a in args: + field = fieldsleft.pop(0) + fieldvalues[field] = a + return fieldvalues, fieldsleft + def __call__(self, *args, **kw): + fieldvalues, sparefields = self.calc_fields(args, kw) + if sparefields: + raise FormException, 'fields %s left'%sparefields + self.assembler.insts.append(Instruction(fieldvalues)) + + +class IBoundDupDesc(IBoundDesc): + def calc_fields(self, args, kw): + s = super(IBoundDupDesc, self) + fieldvalues, sparefields = s.calc_fields(args, kw) + for k, v in self.desc.dupfields.iteritems(): + fieldvalues[k] = fieldvalues[v] + return fieldvalues, sparefields + + +class IDesc(object): + boundtype = IBoundDesc + def __init__(self, fieldmap, fields, specializations, boundtype=None): + self.fieldmap = fieldmap + self.fields = fields + self.specializations = specializations + self.defaults = () + if boundtype is not None: + self.boundtype = boundtype + for field in specializations: + if field not in fields: + raise FormException, field + + def __get__(self, ob, cls=None): + if ob is None: return self + return self.boundtype(self, self.fieldmap, ob) + + def default(self, **defs): + assert len(defs) == 1 + f, v = defs.items()[0] + self.defaults = self.defaults + ((self.fieldmap[f], v),) + return self + + def __call__(self, **more_specializatons): + s = self.specializations.copy() + ms = {} + ds = {} + for fname, v in more_specializatons.iteritems(): + field = self.fieldmap[fname] + if field not in self.fields: + raise FormException, "don't know about '%s' here" % field + if isinstance(v, str): + ds[field] = self.fieldmap[v] + else: + ms[field] = v + s.update(ms) + if len(s) != len(self.specializations) + len(ms): + raise FormException, "respecialization not currently allowed" + if ds: + fields = list(self.fields) + for field in ds: + fields.remove(field) + return IDupDesc(self.fieldmap, tuple(fields), s, ds) + else: + r = IDesc(self.fieldmap, self.fields, s, self.boundtype) + r.defaults = tuple([(f, d) for (f, d) in self.defaults if f not in s]) + return r + + def match(self, inst): + c = 0 + for field in self.fields: + if field in self.specializations: + if field.decode(inst) != self.specializations[field]: + return 0 + else: + c += 1 + return c + + def __repr__(self): + l = [] + for field in self.fields: + if field in self.specializations: + l.append('%s=%r'%(field.name, self.specializations[field])) + else: + l.append(field.name) + r = '%s(%s)'%(self.__class__.__name__, ', '.join(l)) + if self.boundtype is not self.__class__.boundtype: + r += ' => ' + self.boundtype.__name__ + return r + + def disassemble(self, name, inst, labels, pc): + kws = [] + for field in self.fields: + if field not in self.specializations: + v = field.decode(inst) + for f, d in self.defaults: + if f is field: + if d == v: + break + else: + kws.append('%s=%s'%(field.name, field.r(inst, labels, pc))) + return "%-5s %s"%(name, ', '.join(kws)) + + +class IDupDesc(IDesc): + boundtype = IBoundDupDesc + def __init__(self, fieldmap, fields, specializations, dupfields): + super(IDupDesc, self).__init__(fieldmap, fields, specializations) + self.dupfields = dupfields + + def match(self, inst): + for field in self.dupfields: + df = self.dupfields[field] + if field.decode(inst) != df.decode(inst): + return 0 + else: + return super(IDupDesc, self).match(inst) + + +class Form(object): + fieldmap = None + def __init__(self, *fnames): + self.fields = [] + bits = {} + overlap = False + for fname in fnames: + if isinstance(fname, str): + field = self.fieldmap[fname] + else: + field = fname + if field.overlap: + overlap = True + for b in range(field.left, field.right+1): + if not overlap and b in bits: + raise FormException, "'%s' and '%s' clash at bit '%s'"%( + bits[b], fname, b) + else: + bits[b] = fname + self.fields.append(field) + + def __call__(self, **specializations): + s = {} + for fname in specializations: + field = self.fieldmap[fname] + if field not in self.fields: + raise FormException, "no nothin bout '%s'"%fname + s[field] = specializations[fname] + return IDesc(self.fieldmap, self.fields, s) + + def __repr__(self): + return '%s(%r)'%(self.__class__.__name__, [f.name for f in self.fields]) diff --git a/pypy/jit/backend/ppc/helper/__init__.py b/pypy/jit/backend/ppc/helper/__init__.py new file mode 100644 diff --git a/pypy/jit/backend/ppc/helper/assembler.py b/pypy/jit/backend/ppc/helper/assembler.py new file mode 100644 --- /dev/null +++ b/pypy/jit/backend/ppc/helper/assembler.py @@ -0,0 +1,109 @@ +import pypy.jit.backend.ppc.condition as c +from pypy.rlib.rarithmetic import intmask +from pypy.jit.backend.ppc.arch import (MAX_REG_PARAMS, IS_PPC_32, WORD, + BACKCHAIN_SIZE) +from pypy.jit.metainterp.history import FLOAT +import pypy.jit.backend.ppc.register as r +from pypy.rpython.lltypesystem import rffi, lltype + +def gen_emit_cmp_op(condition, signed=True, fp=False): + def f(self, op, arglocs, regalloc): + l0, l1, res = arglocs + # do the comparison + self.mc.cmp_op(0, l0.value, l1.value, + imm=l1.is_imm(), signed=signed, fp=fp) + # After the comparison, place the result + # in the first bit of the CR + if condition == c.LT or condition == c.U_LT: + self.mc.cror(0, 0, 0) + elif condition == c.LE or condition == c.U_LE: + self.mc.cror(0, 0, 2) + elif condition == c.EQ: + self.mc.cror(0, 2, 2) + elif condition == c.GE or condition == c.U_GE: + self.mc.cror(0, 1, 2) + elif condition == c.GT or condition == c.U_GT: + self.mc.cror(0, 1, 1) + elif condition == c.NE: + self.mc.crnor(0, 2, 2) + else: + assert 0, "condition not known" + + resval = res.value + # move the content of the CR to resval + self.mc.mfcr(resval) + # zero out everything except of the result + self.mc.rlwinm(resval, resval, 1, 31, 31) + return f + +def gen_emit_unary_cmp_op(condition): + def f(self, op, arglocs, regalloc): + reg, res = arglocs + + self.mc.cmp_op(0, reg.value, 0, imm=True) + if condition == c.IS_ZERO: + self.mc.cror(0, 2, 2) + elif condition == c.IS_TRUE: + self.mc.cror(0, 0, 1) + else: + assert 0, "condition not known" + + self.mc.mfcr(res.value) + self.mc.rlwinm(res.value, res.value, 1, 31, 31) + return f + +def count_reg_args(args): + reg_args = 0 + words = 0 + count = 0 + for x in range(min(len(args), MAX_REG_PARAMS)): + if args[x].type == FLOAT: + count += 1 + words += 1 + else: + count += 1 + words += 1 + reg_args += 1 + if words > MAX_REG_PARAMS: + reg_args = x + break + return reg_args + +class Saved_Volatiles(object): + """ used in _gen_leave_jitted_hook_code to save volatile registers + in ENCODING AREA around calls + """ + + def __init__(self, codebuilder, save_RES=True, save_FLOAT=True): + self.mc = codebuilder + self.save_RES = save_RES From noreply at buildbot.pypy.org Wed May 7 15:32:21 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 7 May 2014 15:32:21 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Move the imported backend to the right location Message-ID: <20140507133221.32FBC1D27BD@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71374:47420dfc21e2 Date: 2014-02-28 20:45 +0000 http://bitbucket.org/pypy/pypy/changeset/47420dfc21e2/ Log: Move the imported backend to the right location diff --git a/pypy/jit/backend/ppc/_ppcgen.c b/rpython/jit/backend/ppc/_ppcgen.c rename from pypy/jit/backend/ppc/_ppcgen.c rename to rpython/jit/backend/ppc/_ppcgen.c diff --git a/pypy/jit/backend/ppc/arch.py b/rpython/jit/backend/ppc/arch.py rename from pypy/jit/backend/ppc/arch.py rename to rpython/jit/backend/ppc/arch.py diff --git a/pypy/jit/backend/ppc/asmfunc.py b/rpython/jit/backend/ppc/asmfunc.py rename from pypy/jit/backend/ppc/asmfunc.py rename to rpython/jit/backend/ppc/asmfunc.py diff --git a/pypy/jit/backend/ppc/assembler.py b/rpython/jit/backend/ppc/assembler.py rename from pypy/jit/backend/ppc/assembler.py rename to rpython/jit/backend/ppc/assembler.py diff --git a/pypy/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py rename from pypy/jit/backend/ppc/codebuilder.py rename to rpython/jit/backend/ppc/codebuilder.py diff --git a/pypy/jit/backend/ppc/condition.py b/rpython/jit/backend/ppc/condition.py rename from pypy/jit/backend/ppc/condition.py rename to rpython/jit/backend/ppc/condition.py diff --git a/pypy/jit/backend/ppc/field.py b/rpython/jit/backend/ppc/field.py rename from pypy/jit/backend/ppc/field.py rename to rpython/jit/backend/ppc/field.py diff --git a/pypy/jit/backend/ppc/form.py b/rpython/jit/backend/ppc/form.py rename from pypy/jit/backend/ppc/form.py rename to rpython/jit/backend/ppc/form.py diff --git a/pypy/jit/backend/ppc/helper/__init__.py b/rpython/jit/backend/ppc/helper/__init__.py rename from pypy/jit/backend/ppc/helper/__init__.py rename to rpython/jit/backend/ppc/helper/__init__.py diff --git a/pypy/jit/backend/ppc/helper/assembler.py b/rpython/jit/backend/ppc/helper/assembler.py rename from pypy/jit/backend/ppc/helper/assembler.py rename to rpython/jit/backend/ppc/helper/assembler.py diff --git a/pypy/jit/backend/ppc/helper/regalloc.py b/rpython/jit/backend/ppc/helper/regalloc.py rename from pypy/jit/backend/ppc/helper/regalloc.py rename to rpython/jit/backend/ppc/helper/regalloc.py diff --git a/pypy/jit/backend/ppc/jump.py b/rpython/jit/backend/ppc/jump.py rename from pypy/jit/backend/ppc/jump.py rename to rpython/jit/backend/ppc/jump.py diff --git a/pypy/jit/backend/ppc/locations.py b/rpython/jit/backend/ppc/locations.py rename from pypy/jit/backend/ppc/locations.py rename to rpython/jit/backend/ppc/locations.py diff --git a/pypy/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py rename from pypy/jit/backend/ppc/opassembler.py rename to rpython/jit/backend/ppc/opassembler.py diff --git a/pypy/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py rename from pypy/jit/backend/ppc/ppc_assembler.py rename to rpython/jit/backend/ppc/ppc_assembler.py diff --git a/pypy/jit/backend/ppc/ppc_field.py b/rpython/jit/backend/ppc/ppc_field.py rename from pypy/jit/backend/ppc/ppc_field.py rename to rpython/jit/backend/ppc/ppc_field.py diff --git a/pypy/jit/backend/ppc/ppc_form.py b/rpython/jit/backend/ppc/ppc_form.py rename from pypy/jit/backend/ppc/ppc_form.py rename to rpython/jit/backend/ppc/ppc_form.py diff --git a/pypy/jit/backend/ppc/rassemblermaker.py b/rpython/jit/backend/ppc/rassemblermaker.py rename from pypy/jit/backend/ppc/rassemblermaker.py rename to rpython/jit/backend/ppc/rassemblermaker.py diff --git a/pypy/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py rename from pypy/jit/backend/ppc/regalloc.py rename to rpython/jit/backend/ppc/regalloc.py diff --git a/pypy/jit/backend/ppc/register.py b/rpython/jit/backend/ppc/register.py rename from pypy/jit/backend/ppc/register.py rename to rpython/jit/backend/ppc/register.py diff --git a/pypy/jit/backend/ppc/regname.py b/rpython/jit/backend/ppc/regname.py rename from pypy/jit/backend/ppc/regname.py rename to rpython/jit/backend/ppc/regname.py diff --git a/pypy/jit/backend/ppc/symbol_lookup.py b/rpython/jit/backend/ppc/symbol_lookup.py rename from pypy/jit/backend/ppc/symbol_lookup.py rename to rpython/jit/backend/ppc/symbol_lookup.py diff --git a/pypy/jit/backend/ppc/test/autopath.py b/rpython/jit/backend/ppc/test/autopath.py rename from pypy/jit/backend/ppc/test/autopath.py rename to rpython/jit/backend/ppc/test/autopath.py diff --git a/pypy/jit/backend/ppc/test/test_basic.py b/rpython/jit/backend/ppc/test/test_basic.py rename from pypy/jit/backend/ppc/test/test_basic.py rename to rpython/jit/backend/ppc/test/test_basic.py diff --git a/pypy/jit/backend/ppc/test/test_call_assembler.py b/rpython/jit/backend/ppc/test/test_call_assembler.py rename from pypy/jit/backend/ppc/test/test_call_assembler.py rename to rpython/jit/backend/ppc/test/test_call_assembler.py diff --git a/pypy/jit/backend/ppc/test/test_del.py b/rpython/jit/backend/ppc/test/test_del.py rename from pypy/jit/backend/ppc/test/test_del.py rename to rpython/jit/backend/ppc/test/test_del.py diff --git a/pypy/jit/backend/ppc/test/test_dict.py b/rpython/jit/backend/ppc/test/test_dict.py rename from pypy/jit/backend/ppc/test/test_dict.py rename to rpython/jit/backend/ppc/test/test_dict.py diff --git a/pypy/jit/backend/ppc/test/test_field.py b/rpython/jit/backend/ppc/test/test_field.py rename from pypy/jit/backend/ppc/test/test_field.py rename to rpython/jit/backend/ppc/test/test_field.py diff --git a/pypy/jit/backend/ppc/test/test_float.py b/rpython/jit/backend/ppc/test/test_float.py rename from pypy/jit/backend/ppc/test/test_float.py rename to rpython/jit/backend/ppc/test/test_float.py diff --git a/pypy/jit/backend/ppc/test/test_form.py b/rpython/jit/backend/ppc/test/test_form.py rename from pypy/jit/backend/ppc/test/test_form.py rename to rpython/jit/backend/ppc/test/test_form.py diff --git a/pypy/jit/backend/ppc/test/test_loop_unroll.py b/rpython/jit/backend/ppc/test/test_loop_unroll.py rename from pypy/jit/backend/ppc/test/test_loop_unroll.py rename to rpython/jit/backend/ppc/test/test_loop_unroll.py diff --git a/pypy/jit/backend/ppc/test/test_ppc.py b/rpython/jit/backend/ppc/test/test_ppc.py rename from pypy/jit/backend/ppc/test/test_ppc.py rename to rpython/jit/backend/ppc/test/test_ppc.py diff --git a/pypy/jit/backend/ppc/test/test_rassemblermaker.py b/rpython/jit/backend/ppc/test/test_rassemblermaker.py rename from pypy/jit/backend/ppc/test/test_rassemblermaker.py rename to rpython/jit/backend/ppc/test/test_rassemblermaker.py diff --git a/pypy/jit/backend/ppc/test/test_regalloc.py b/rpython/jit/backend/ppc/test/test_regalloc.py rename from pypy/jit/backend/ppc/test/test_regalloc.py rename to rpython/jit/backend/ppc/test/test_regalloc.py diff --git a/pypy/jit/backend/ppc/test/test_register_manager.py b/rpython/jit/backend/ppc/test/test_register_manager.py rename from pypy/jit/backend/ppc/test/test_register_manager.py rename to rpython/jit/backend/ppc/test/test_register_manager.py diff --git a/pypy/jit/backend/ppc/test/test_stackframe.py b/rpython/jit/backend/ppc/test/test_stackframe.py rename from pypy/jit/backend/ppc/test/test_stackframe.py rename to rpython/jit/backend/ppc/test/test_stackframe.py diff --git a/pypy/jit/backend/ppc/test/test_virtualref.py b/rpython/jit/backend/ppc/test/test_virtualref.py rename from pypy/jit/backend/ppc/test/test_virtualref.py rename to rpython/jit/backend/ppc/test/test_virtualref.py From noreply at buildbot.pypy.org Wed May 7 15:53:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 15:53:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Revert partially d95d0c9bb988, because it was bogus. My fault for Message-ID: <20140507135327.89F3E1C155F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71388:44cefa6f9a3e Date: 2014-05-07 15:36 +0200 http://bitbucket.org/pypy/pypy/changeset/44cefa6f9a3e/ Log: Revert partially d95d0c9bb988, because it was bogus. My fault for doing a quick fix without writing tests: now I don't have any more a translation where this fix helped. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -296,10 +296,11 @@ # trim: instructions with no framesize are removed from self.insns, # and from the 'previous_insns' lists - assert hasattr(self.insns[0], 'framesize') - old = self.insns[1:] - del self.insns[1:] - for insn in old: + if 0: # <- XXX disabled because it seems bogus, investigate more + assert hasattr(self.insns[0], 'framesize') + old = self.insns[1:] + del self.insns[1:] + for insn in old: if hasattr(insn, 'framesize'): self.insns.append(insn) insn.previous_insns = [previnsn for previnsn in insn.previous_insns From noreply at buildbot.pypy.org Wed May 7 18:31:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 7 May 2014 18:31:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out changeset: ad12f8418f24 Message-ID: <20140507163143.DD0D71C10BD@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71389:8e7585c27369 Date: 2014-05-07 19:25 +0300 http://bitbucket.org/pypy/pypy/changeset/8e7585c27369/ Log: Backed out changeset: ad12f8418f24 diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -753,8 +753,6 @@ def add_extra_files(eci): srcdir = py.path.local(__file__).join('..', 'src') - _MSVC = eci.platform.name == 'msvc' - files = [ srcdir / 'entrypoint.c', # ifdef PYPY_STANDALONE srcdir / 'allocator.c', # ifdef PYPY_STANDALONE @@ -771,8 +769,6 @@ ] if _CYGWIN: files.append(srcdir / 'cygwin_wait.c') - if _MSVC: - files.append(srcdir / 'asm_msvc.c') return eci.merge(ExternalCompilationInfo(separate_module_files=files)) diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,6 +1,5 @@ #ifdef PYPY_X86_CHECK_SSE2 #include -#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Wed May 7 18:31:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 7 May 2014 18:31:45 +0200 (CEST) Subject: [pypy-commit] pypy default: a better fix for translation on msvc after 3484aaa1e858 Message-ID: <20140507163145.04A331C10BD@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71390:a29ce0b1a8b4 Date: 2014-05-07 19:30 +0300 http://bitbucket.org/pypy/pypy/changeset/a29ce0b1a8b4/ Log: a better fix for translation on msvc after 3484aaa1e858 diff --git a/rpython/translator/c/src/asm.c b/rpython/translator/c/src/asm.c --- a/rpython/translator/c/src/asm.c +++ b/rpython/translator/c/src/asm.c @@ -12,6 +12,6 @@ # include "src/asm_ppc.c" #endif -#if defined(MS_WINDOWS) && defined(_MSC_VER) +#if defined(_MSC_VER) # include "src/asm_msvc.c" #endif diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,5 +1,6 @@ #ifdef PYPY_X86_CHECK_SSE2 #include +#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Wed May 7 18:31:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 18:31:46 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Revert partially d95d0c9bb988, because it was bogus. My fault for Message-ID: <20140507163146.32D521C10BD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71391:8eb9af65507b Date: 2014-05-07 15:36 +0200 http://bitbucket.org/pypy/pypy/changeset/8eb9af65507b/ Log: Revert partially d95d0c9bb988, because it was bogus. My fault for doing a quick fix without writing tests: now I don't have any more a translation where this fix helped. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -296,10 +296,11 @@ # trim: instructions with no framesize are removed from self.insns, # and from the 'previous_insns' lists - assert hasattr(self.insns[0], 'framesize') - old = self.insns[1:] - del self.insns[1:] - for insn in old: + if 0: # <- XXX disabled because it seems bogus, investigate more + assert hasattr(self.insns[0], 'framesize') + old = self.insns[1:] + del self.insns[1:] + for insn in old: if hasattr(insn, 'framesize'): self.insns.append(insn) insn.previous_insns = [previnsn for previnsn in insn.previous_insns From noreply at buildbot.pypy.org Wed May 7 18:31:47 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 7 May 2014 18:31:47 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Backed out changeset: ad12f8418f24 Message-ID: <20140507163147.4F0CE1C10BD@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71392:154f94fa54b1 Date: 2014-05-07 19:25 +0300 http://bitbucket.org/pypy/pypy/changeset/154f94fa54b1/ Log: Backed out changeset: ad12f8418f24 diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -751,8 +751,6 @@ def add_extra_files(eci): srcdir = py.path.local(__file__).join('..', 'src') - _MSVC = eci.platform.name == 'msvc' - files = [ srcdir / 'entrypoint.c', # ifdef PYPY_STANDALONE srcdir / 'allocator.c', # ifdef PYPY_STANDALONE @@ -769,8 +767,6 @@ ] if _CYGWIN: files.append(srcdir / 'cygwin_wait.c') - if _MSVC: - files.append(srcdir / 'asm_msvc.c') return eci.merge(ExternalCompilationInfo(separate_module_files=files)) diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,6 +1,5 @@ #ifdef PYPY_X86_CHECK_SSE2 #include -#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Wed May 7 18:31:48 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 7 May 2014 18:31:48 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: a better fix for translation on msvc after 3484aaa1e858 Message-ID: <20140507163148.69DC81C10BD@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71393:90042a6e8292 Date: 2014-05-07 19:30 +0300 http://bitbucket.org/pypy/pypy/changeset/90042a6e8292/ Log: a better fix for translation on msvc after 3484aaa1e858 diff --git a/rpython/translator/c/src/asm.c b/rpython/translator/c/src/asm.c --- a/rpython/translator/c/src/asm.c +++ b/rpython/translator/c/src/asm.c @@ -12,6 +12,6 @@ # include "src/asm_ppc.c" #endif -#if defined(MS_WINDOWS) && defined(_MSC_VER) +#if defined(_MSC_VER) # include "src/asm_msvc.c" #endif diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,5 +1,6 @@ #ifdef PYPY_X86_CHECK_SSE2 #include +#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Wed May 7 19:53:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 May 2014 19:53:38 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add this as a todo item Message-ID: <20140507175338.833341C328C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71394:ecd03bce28a0 Date: 2014-05-07 19:53 +0200 http://bitbucket.org/pypy/pypy/changeset/ecd03bce28a0/ Log: Add this as a todo item diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -97,6 +97,13 @@ ------------------------------------------------------------ +allocating dummy 16 bytes if a loop doesn't allocate anything else: +could be replaced by lowering the nursery's limit, to avoid creating +holes (or even mostly-empty, already-zero nurseries that must still be +entirely memset) + +------------------------------------------------------------ + From noreply at buildbot.pypy.org Wed May 7 21:16:10 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed obsolete source file. Message-ID: <20140507191610.568021C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r798:8496143eb9ae Date: 2014-05-05 14:25 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8496143eb9ae/ Log: Removed obsolete source file. diff --git a/spyvm/strategies.py b/spyvm/strategies.py deleted file mode 100644 --- a/spyvm/strategies.py +++ /dev/null @@ -1,259 +0,0 @@ - -import sys, math -from spyvm import model, shadow, constants -from rpython.rlib import longlong2float, rarithmetic -from rpython.rlib.rstruct.runpack import runpack -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.objectmodel import import_from_mixin -from rpython.rlib.rfloat import string_to_float - -class AbstractStorageStrategy(shadow.AbstractShadow): - _immutable_fields_ = [] - _attrs_ = [] - _settled_ = True - strategy_tag = 'abstract' - needs_objspace = False - - def set_initial_storage(self, space, w_obj, size): - raise NotImplementedError("Abstract base class") - def set_storage_for_list(self, space, w_obj, collection): - raise NotImplementedError("Abstract base class") - def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - raise NotImplementedError("Abstract base class") - - def store(self, space, w_obj, n0, w_val): - if self.can_contain(space, w_val): - return self.do_store(space, w_obj, n0, w_val) - new_strategy = self.generelized_strategy_for(space, w_val) - return w_obj.store_with_new_strategy(space, new_strategy, n0, w_val) - - def generelized_strategy_for(self, space, w_val): - raise NotImplementedError("Abstract base class") - def can_contain(self, space, w_val): - raise NotImplementedError("Abstract base class") - def fetch(self, space, w_obj, n0): - raise NotImplementedError("Abstract base class") - def do_store(self, space, w_obj, n0, w_val): - raise NotImplementedError("Abstract base class") - -class AbstractListStorageStrategy(AbstractStorageStrategy): - strategy_tag = 'abstract-list' - - def storage(self, w_obj): - return w_obj.list_storage - def set_initial_storage(self, space, w_obj, size): - w_obj.list_storage = self.initial_storage(space, size) - def set_storage_for_list(self, space, w_obj, collection): - w_obj.list_storage = self.storage_for_list(space, collection) - def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.list_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) - - def initial_storage(self, space, size): - raise NotImplementedError("Abstract base class") - def storage_for_list(self, space, collection): - raise NotImplementedError("Abstract base class") - def copy_storage_from(self, space, w_obj, reuse_storage): - old_strategy = w_obj.strategy - if old_strategy == self and reuse_storage: - return self.storage(w_obj) - else: - # This can be overridden and optimized (reuse_storage flag, less temporary storage) - return self.storage_for_list(space, w_obj.fetch_all(space)) - -class AbstractIntStorageStrategy(AbstractStorageStrategy): - strategy_tag = 'abstract-int' - - def storage(self, w_obj): - return w_obj.int_storage - def set_initial_storage(self, space, w_obj, size): - w_obj.int_storage = self.initial_storage(space, size) - def set_storage_for_list(self, space, w_obj, collection): - w_obj.int_storage = self.storage_for_list(space, collection) - def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - w_obj.int_storage = self.copy_storage_from(space, w_source_obj, reuse_storage) - - def generelized_strategy_for(self, space, w_val): - return ListStorageStrategy.singleton - def initial_storage(self, space, size): - raise NotImplementedError("Abstract base class") - def storage_for_list(self, space, collection): - raise NotImplementedError("Abstract base class") - def copy_storage_from(self, space, w_obj, reuse_storage): - old_strategy = w_obj.strategy - if old_strategy == self and reuse_storage: - return self.storage(w_obj) - else: - # This can be overridden and optimized (reuse_storage flag, less temporary storage) - return self.storage_for_list(space, w_obj.fetch_all(space)) - -class SingletonMeta(type): - def __new__(cls, name, bases, dct): - result = type.__new__(cls, name, bases, dct) - result.singleton = result() - return result - -# this is the typical "initial" storage strategy, for when every slot -# in an object is still nil. No storage is allocated. -class AllNilStorageStrategy(AbstractStorageStrategy): - __metaclass__ = SingletonMeta - strategy_tag = 'allnil' - - def can_contain(self, space, w_obj): - return w_obj == space.w_nil - def fetch(self, space, w_obj, n0): - return space.w_nil - def do_store(self, space, w_obj, n0, w_val): - pass - - def generelized_strategy_for(self, space, w_val): - return find_strategy_for_objects(space, [w_val]) - def set_initial_storage(self, space, w_obj, size): - pass - def set_storage_for_list(self, space, w_obj, collection): - pass - def set_storage_copied_from(self, space, w_obj, w_source_obj, reuse_storage=False): - pass - -# This is the regular storage strategy that does not result in any -# optimizations but can handle every case. Applicable for both -# fixed-sized and var-sized objects. -class ListStorageStrategy(AbstractListStorageStrategy): - __metaclass__ = SingletonMeta - strategy_tag = 'list' - - def can_contain(self, space, w_val): - return True - def fetch(self, space, w_obj, n0): - return self.storage(w_obj)[n0] - def do_store(self, space, w_obj, n0, w_val): - # TODO enable generalization by maintaining a counter of elements that are nil. - self.storage(w_obj)[n0] = w_val - def initial_storage(self, space, size): - return [space.w_nil] * size - def storage_for_list(self, space, collection): - return [x for x in collection] - def copy_storage_from(self, space, w_obj, reuse_storage=False): - length = w_obj.basic_size() - return [w_obj.strategy.fetch(space, w_obj, i) for i in range(length)] - -class AbstractValueOrNilStorageStrategy(AbstractIntStorageStrategy): - needs_objspace = True - strategy_tag = 'abstract-valueOrNil' - # TODO -- use another value... something like max_float? - nil_value = runpack("d", "\x10\x00\x00\x00\x00\x00\xf8\x7f") - nil_value_longlong = longlong2float.float2longlong(nil_value) - - def is_nil_value(self, val): - return longlong2float.float2longlong(val) == self.nil_value_longlong - - def can_contain(self, space, w_val): - return w_val == space.w_nil or \ - (isinstance(w_val, self.wrapper_class) \ - and not self.is_nil_value(self.unwrap(space, w_val))) - - def fetch(self, space, w_obj, n0): - val = self.storage(w_obj)[n0] - if self.is_nil_value(val): - return space.w_nil - else: - return self.wrap(space, val) - - def do_store(self, space, w_obj, n0, w_val): - store = self.storage(w_obj) - if w_val == space.w_nil: - store[n0] = self.nil_value - else: - store[n0] = self.unwrap(space, w_val) - - def initial_storage(self, space, size): - return [self.nil_value] * size - - def storage_for_list(self, space, collection): - length = len(collection) - store = self.initial_storage(space, length) - for i in range(length): - if collection[i] != space.w_nil: - store[i] = self.unwrap(space, collection[i]) - return store - -def _int_to_float(int_val): - return longlong2float.longlong2float(rffi.cast(lltype.SignedLongLong, int_val)) - -class SmallIntegerOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): - __metaclass__ = SingletonMeta - strategy_tag = 'smallint-orNil' - wrapper_class = model.W_SmallInteger - - def wrap(self, space, val): - int_val = rarithmetic.intmask(longlong2float.float2longlong(val)) - return space.wrap_int(int_val) - def unwrap(self, space, w_val): - assert isinstance(w_val, model.W_SmallInteger) - int_val = space.unwrap_int(w_val) - return _int_to_float(int_val) - -class FloatOrNilStorageStrategy(AbstractValueOrNilStorageStrategy): - __metaclass__ = SingletonMeta - strategy_tag = 'float-orNil' - wrapper_class = model.W_Float - - def wrap(self, space, val): - return space.wrap_float(val) - def unwrap(self, space, w_val): - assert isinstance(w_val, model.W_Float) - return space.unwrap_float(w_val) - -def find_strategy_for_objects(space, vars): - specialized_strategies = 3 - all_nil_can_handle = True - small_int_can_handle = True - float_can_handle = True - for w_obj in vars: - if all_nil_can_handle and not AllNilStorageStrategy.singleton.can_contain(space, w_obj): - all_nil_can_handle = False - specialized_strategies = specialized_strategies - 1 - if small_int_can_handle and not SmallIntegerOrNilStorageStrategy.singleton.can_contain(space, w_obj): - small_int_can_handle = False - specialized_strategies = specialized_strategies - 1 - if float_can_handle and not FloatOrNilStorageStrategy.singleton.can_contain(space, w_obj): - float_can_handle = False - specialized_strategies = specialized_strategies - 1 - - if specialized_strategies <= 0: - return ListStorageStrategy.singleton - - if all_nil_can_handle: - return AllNilStorageStrategy.singleton - if small_int_can_handle: - return SmallIntegerOrNilStorageStrategy.singleton - if float_can_handle: - return FloatOrNilStorageStrategy.singleton - - # If this happens, please look for a bug in the code above. - assert False, "No strategy could be found for list..." - -def empty_strategy(s_containing_class): - if s_containing_class is None: - # This is a weird and rare special case for w_nil - return ListStorageStrategy.singleton - if not s_containing_class.isvariable(): - return ListStorageStrategy.singleton - - # A newly allocated object contains only nils. - return AllNilStorageStrategy.singleton - -def strategy_for_list(s_containing_class, vars): - if s_containing_class is None: - # This is a weird and rare special case for w_nil - return ListStorageStrategy.singleton - try: - is_variable = s_containing_class.isvariable() - except AttributeError: - # TODO - This happens during bootstrapping phase, when filling in generic objects. - # Ths class object shadows are not yet synchronized. - return ListStorageStrategy.singleton - - if is_variable: - return find_strategy_for_objects(s_containing_class.space, vars) - else: - return ListStorageStrategy.singleton From noreply at buildbot.pypy.org Wed May 7 21:16:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Small fix. Message-ID: <20140507191611.7DCCC1C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r799:986a12192755 Date: 2014-05-05 17:20 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/986a12192755/ Log: Small fix. diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -31,7 +31,7 @@ size = w_obj.size() key = self.make_key(operation, old_storage, new_storage) - if _stats.do_stats: + if self.do_stats: self.stat_operation(key, size) if self.do_log: if log_classname: From noreply at buildbot.pypy.org Wed May 7 21:16:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Refactored storage statistics to save memory when details are not required. Message-ID: <20140507191612.8EFB61C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r800:4e5d3401cdf2 Date: 2014-05-06 10:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4e5d3401cdf2/ Log: Refactored storage statistics to save memory when details are not required. diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -13,16 +13,16 @@ return a[0] < b[0] class StorageStatistics(object): - # Key: (operation_name, old_storage, new_storage) - # Value: [sizes] - stats = {} + modules = [] + using_classname = False - do_log = False - do_stats = False - do_stats_sizes = False + def add_module(self, module): + if module not in self.modules: + self.modules.append(module) + self.using_classname = self.using_classname or module.uses_classname def log(self, w_obj, operation, old_storage_object, log_classname): - if self.do_log or self.do_stats: + if len(self.modules) > 0: new_storage = w_obj.shadow.repr_classname if old_storage_object: old_storage = old_storage_object.repr_classname @@ -31,62 +31,101 @@ size = w_obj.size() key = self.make_key(operation, old_storage, new_storage) - if self.do_stats: - self.stat_operation(key, size) - if self.do_log: - if log_classname: - classname = w_obj.guess_classname() - else: - classname = None - self.log_operation(key, size, classname) + if self.using_classname and log_classname: + classname = w_obj.guess_classname() + else: + classname = None + for module in self.modules: + module.storage_operation(key, size, classname) def make_key(self, operation, old_storage, new_storage): return (operation, old_storage, new_storage) - - def stat_operation(self, key, size): - if not key in self.stats: - self.stats[key] = [] - self.stats[key].append(size) + + def print_results(self): + for module in self.modules: + module.print_results() - def log_operation(self, key, size, classname): - print self.log_operation_string(key, size, classname) - +class StatisticsModule(object): + uses_classname = False + def storage_operation(self, operation_key, storage_size, element_classname): + raise NotImplementedError("Abstract class") + def print_results(self): + raise NotImplementedError("Abstract class") def key_string(self, key): if key[1]: return "%s (%s -> %s)" % (key[0], key[1], key[2]) else: return "%s (%s)" % (key[0], key[2]) - - def log_operation_string(self, key, size, classname): - if classname: - return "%s of %s size %d" % (self.key_string(key), classname, size) + +class StatisticsLogger(StatisticsModule): + uses_classname = True + def storage_operation(self, operation_key, storage_size, element_classname): + print self.log_string(operation_key, storage_size, element_classname) + + def log_string(self, operation_key, storage_size, element_classname): + if element_classname: + return "%s of %s size %d" % (self.key_string(operation_key), element_classname, storage_size) else: - return "%s size %d" % (self.key_string(key), size) - + return "%s size %d" % (self.key_string(operation_key), storage_size) + + def print_results(self): + # Nothing to do, this is just for logging during runtime. + pass + +class AbstractStatisticsCollector(StatisticsModule): + stats = {} + + def storage_operation(self, operation_key, storage_size, element_classname): + if not operation_key in self.stats: + self.stats[operation_key] = self.initial_value() + self.increment_value(self.stats[operation_key], storage_size) + def sorted_keys(self): keys = [ x for x in self.stats ] StatsSorter(keys).sort() return keys - - def print_stats(self): + +class StatisticsCollector(AbstractStatisticsCollector): + # Value: [total_size, num_operations] + def initial_value(self): return [0, 0] + def increment_value(self, value_object, storage_size): + value_object[0] = value_object[0] + storage_size + value_object[1] = value_object[1] + 1 + def print_results(self): + print "Storage Statistics:" for key in self.sorted_keys(): - sizes = self.stats[key] - sum = 0 - for s in sizes: sum += s - print "%s: %d times, avg size: %f" % (self.key_string(key), len(sizes), float(sum)/len(sizes)) - if self.do_stats_sizes: - print " All sizes: %s" % sizes + tuple = self.stats[key] + sum = tuple[0] + num = tuple[1] + print "\t%s: %d times, avg size: %f" % (self.key_string(key), num, float(sum)/num) + +class DetailedStatisticsCollector(AbstractStatisticsCollector): + # Value: list of numbers (sizes) + def initial_value(self): return [] + def increment_value(self, value_object, storage_size): + value_object.append(storage_size) + def print_results(self): + print "Detailed Storage Statistics:" + for key in self.sorted_keys(): + print "\t%s: s" % (self.key_string(key), self.stats[key]) + +# Static & global access to a StorageStatistics instance. _stats = StorageStatistics() +_logger = StatisticsLogger() +_collector = StatisticsCollector() +_detailedcollector = DetailedStatisticsCollector() -def activate_statistics(log=False, statistics=False, statstics_sizes=False): - _stats.do_log = _stats.do_log or log - _stats.do_stats = _stats.do_stats or statistics - _stats.do_stats_sizes = _stats.do_stats_sizes or statstics_sizes +def activate_statistics(log=False, statistics=False, detailed_statistics=False): + if log: + _stats.add_module(_logger) + if statistics: + _stats.add_module(_collector) + if detailed_statistics: + _stats.add_module(_detailedcollector) def print_statistics(): - if _stats.do_stats: - _stats.print_stats() + _stats.print_results() def log(w_obj, operation, old_storage=None, log_classname=True): _stats.log(w_obj, operation, old_storage, log_classname) diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -178,25 +178,27 @@ def test_statistics_stats(): stats = storage_statistics.StorageStatistics() - stats.stat_operation(stats.make_key("B", "old", "new"), 3) - stats.stat_operation(stats.make_key("B", "old", "new"), 4) - stats.stat_operation(stats.make_key("B", "old2", "new2"), 20) - stats.stat_operation(stats.make_key("B", "old", "new"), 5) - stats.stat_operation(stats.make_key("A", "old", "new"), 1) - stats.stat_operation(stats.make_key("A", "old", "new"), 2) - stats.stat_operation(stats.make_key("C", "old", "new"), 10) - stats.stat_operation(stats.make_key("C", "old", "new"), 11) - keys = stats.sorted_keys() + col = storage_statistics.DetailedStatisticsCollector() + col.storage_operation(stats.make_key("B", "old", "new"), 3, None) + col.storage_operation(stats.make_key("B", "old", "new"), 4, None) + col.storage_operation(stats.make_key("B", "old2", "new2"), 20, None) + col.storage_operation(stats.make_key("B", "old", "new"), 5, None) + col.storage_operation(stats.make_key("A", "old", "new"), 1, None) + col.storage_operation(stats.make_key("A", "old", "new"), 2, None) + col.storage_operation(stats.make_key("C", "old", "new"), 10, None) + col.storage_operation(stats.make_key("C", "old", "new"), 11, None) + keys = col.sorted_keys() assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] - assert stats.stats[keys[0]] == [1, 2] - assert stats.stats[keys[1]] == [3, 4, 5] - assert stats.stats[keys[2]] == [20] - assert stats.stats[keys[3]] == [10, 11] + assert col.stats[keys[0]] == [1, 2] + assert col.stats[keys[1]] == [3, 4, 5] + assert col.stats[keys[2]] == [20] + assert col.stats[keys[3]] == [10, 11] def test_statistics_log(): stats = storage_statistics.StorageStatistics() - s = stats.log_operation_string(stats.make_key("Operation", "old_storage", "new_storage"), 22, "classname") + log = storage_statistics.StatisticsLogger() + s = log.log_string(stats.make_key("Operation", "old_storage", "new_storage"), 22, "classname") assert s == "Operation (old_storage -> new_storage) of classname size 22" - s = stats.log_operation_string(stats.make_key("InitialOperation", None, "some_new_storage"), 40, "a_classname") + s = log.log_string(stats.make_key("InitialOperation", None, "some_new_storage"), 40, "a_classname") assert s == "InitialOperation (some_new_storage) of a_classname size 40" \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -127,7 +127,7 @@ -p|--poll_events --strategy-log --strategy-stats - --strategy-stats-with-sizes + --strategy-stats-details [image path, default: Squeak.image] """ % argv[0] @@ -189,8 +189,8 @@ storage_statistics.activate_statistics(log=True) elif arg == "--strategy-stats": storage_statistics.activate_statistics(statistics=True) - elif arg == "--strategy-stats-with-sizes": - storage_statistics.activate_statistics(statistics=True, statstics_sizes=True) + elif arg == "--strategy-stats-details": + storage_statistics.activate_statistics(statistics=True, detailed_statistics=True) elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Wed May 7 21:16:13 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:13 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added output of strategy statistics as dot-graphs Message-ID: <20140507191613.8A0CB1C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r801:a799759b6a04 Date: 2014-05-06 13:23 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a799759b6a04/ Log: Added output of strategy statistics as dot-graphs diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -99,6 +99,72 @@ num = tuple[1] print "\t%s: %d times, avg size: %f" % (self.key_string(key), num, float(sum)/num) +class DotStatisticsCollector(StatisticsCollector): + incoming_operations = {} + incoming_elements = {} + outgoing_operations = {} + outgoing_elements = {} + def storage_operation(self, key, storage_size, element_classname): + StatisticsCollector.storage_operation(self, key, storage_size, element_classname) + source_type = key[1] + target_type = key[2] + self.fill_maps(self.incoming_operations, self.incoming_elements, target_type, storage_size) + if source_type: + self.fill_maps(self.outgoing_operations, self.outgoing_elements, source_type, storage_size) + + def fill_maps(self, operations_map, elements_map, key_type, size): + if key_type not in operations_map: + operations_map[key_type] = 0 + elements_map[key_type] = 0 + operations_map[key_type] = operations_map[key_type] + 1 + elements_map[key_type] = elements_map[key_type] + size + + def print_result(self): + print "Storage Statistics (dot format):" + print "================================" + print self.dot_string() + + def dot_string(self): + # Unfortunately, this is pretty complicated and messy... Sorry. + result = "digraph G {" + result += "loading_image [label=\"Image Loading\",shape=box];" + result += "created_object [label=\"Object Creation\",shape=box];" + for key in self.stats: + operation_type = key[0] + target_node = key[2] + elements = self.stats[key][0] + operations = self.stats[key][1] + label_suffix = "" + if operation_type == "Switched": + source_node = key[1] + percent_ops = float(operations) / float(self.incoming_operations[source_node]) * 100 + percent_elements = float(elements) / float(self.incoming_elements[source_node]) * 100 + label_suffix = "\n%0.2f%% objects\n%0.2f%% elements" % (percent_ops, percent_elements) + elif operation_type == "Initialized": + source_node = "created_object" + elif operation_type == "Filledin": + source_node = "loading_image" + else: + print "Could not handle storage operation %s" % operation_type + continue + result += "%s -> %s [label=\"%d (avg %0.2f)%s\"];" % (source_node, target_node, operations, float(elements)/float(operations), label_suffix) + for type in self.incoming_operations: + incoming_ops = self.incoming_operations[type] + incoming_els = self.incoming_elements[type] + label = "\nIncoming objects: %d" % incoming_ops + label += "\nIncoming elements: %d" % incoming_els + if type in self.outgoing_operations: + remaining_ops = incoming_ops - self.outgoing_operations[type] + remaining_els = incoming_els - self.outgoing_elements[type] + else: + remaining_ops = incoming_ops + remaining_els = incoming_els + label += "\nRemaining objects: %d (%0.2f%%)" % (remaining_ops, float(remaining_ops)/incoming_ops*100) + label += "\nRemaining elements: %d (%0.2f%%)" % (remaining_els, float(remaining_els)/incoming_els*100) + result += "%s [label=\"%s%s\"];" % (type, type, label) + result += "}" + return result + class DetailedStatisticsCollector(AbstractStatisticsCollector): # Value: list of numbers (sizes) def initial_value(self): return [] @@ -115,14 +181,17 @@ _logger = StatisticsLogger() _collector = StatisticsCollector() _detailedcollector = DetailedStatisticsCollector() +_dotcollector = DotStatisticsCollector() -def activate_statistics(log=False, statistics=False, detailed_statistics=False): +def activate_statistics(log=False, statistics=False, detailed_statistics=False, dot=False): if log: _stats.add_module(_logger) if statistics: _stats.add_module(_collector) if detailed_statistics: _stats.add_module(_detailedcollector) + if dot: + _stats.add_module(_dotcollector) def print_statistics(): _stats.print_results() diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -177,8 +177,8 @@ check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) def test_statistics_stats(): + col = storage_statistics.DetailedStatisticsCollector() stats = storage_statistics.StorageStatistics() - col = storage_statistics.DetailedStatisticsCollector() col.storage_operation(stats.make_key("B", "old", "new"), 3, None) col.storage_operation(stats.make_key("B", "old", "new"), 4, None) col.storage_operation(stats.make_key("B", "old2", "new2"), 20, None) @@ -201,4 +201,22 @@ assert s == "Operation (old_storage -> new_storage) of classname size 22" s = log.log_string(stats.make_key("InitialOperation", None, "some_new_storage"), 40, "a_classname") assert s == "InitialOperation (some_new_storage) of a_classname size 40" + +def test_statistics_stats_dot(): + col = storage_statistics.DotStatisticsCollector() + stats = storage_statistics.StorageStatistics() + + col.storage_operation(stats.make_key("Switched", "old", "new"), 10, None) + col.storage_operation(stats.make_key("Switched", "old", "new"), 10, None) + col.storage_operation(stats.make_key("Switched", "new", "new2"), 10, None) + col.storage_operation(stats.make_key("Switched", "old2", "new"), 5, None) + col.storage_operation(stats.make_key("Initialized", None, "old"), 13, None) + col.storage_operation(stats.make_key("Initialized", None, "old"), 10, None) + col.storage_operation(stats.make_key("Initialized", None, "old"), 10, None) + col.storage_operation(stats.make_key("Initialized", None, "old2"), 15, None) + col.storage_operation(stats.make_key("Filledin", None, "old2"), 20, None) + col.storage_operation(stats.make_key("Filledin", None, "new"), 10, None) + col.storage_operation(stats.make_key("Filledin", None, "new"), 11, None) + + assert col.dot_string() == "" \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -189,6 +189,8 @@ storage_statistics.activate_statistics(log=True) elif arg == "--strategy-stats": storage_statistics.activate_statistics(statistics=True) + elif arg == "--strategy-stats-dot": + storage_statistics.activate_statistics(dot=True) elif arg == "--strategy-stats-details": storage_statistics.activate_statistics(statistics=True, detailed_statistics=True) elif path is None: From noreply at buildbot.pypy.org Wed May 7 21:16:14 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:14 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added new parameter to help string. Message-ID: <20140507191614.8CF7A1C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r802:88893a3ced56 Date: 2014-05-06 13:24 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/88893a3ced56/ Log: Added new parameter to help string. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -127,6 +127,7 @@ -p|--poll_events --strategy-log --strategy-stats + --strategy-stats-dot --strategy-stats-details [image path, default: Squeak.image] """ % argv[0] From noreply at buildbot.pypy.org Wed May 7 21:16:15 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:15 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed storage statistics, made compile under RPython. Message-ID: <20140507191615.907391C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r803:0f7c80f95960 Date: 2014-05-06 18:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0f7c80f95960/ Log: Fixed storage statistics, made compile under RPython. diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -119,13 +119,16 @@ operations_map[key_type] = operations_map[key_type] + 1 elements_map[key_type] = elements_map[key_type] + size - def print_result(self): + def print_results(self): print "Storage Statistics (dot format):" print "================================" + print "*/" # End the commend started in activate_statistics() print self.dot_string() def dot_string(self): - # Unfortunately, this is pretty complicated and messy... Sorry. + # Return a string that is valid dot code and can be parsed by the graphviz dot utility. + # Unfortunately, this is pretty complicated and messy... Sorry. + result = "digraph G {" result += "loading_image [label=\"Image Loading\",shape=box];" result += "created_object [label=\"Object Creation\",shape=box];" @@ -139,7 +142,7 @@ source_node = key[1] percent_ops = float(operations) / float(self.incoming_operations[source_node]) * 100 percent_elements = float(elements) / float(self.incoming_elements[source_node]) * 100 - label_suffix = "\n%0.2f%% objects\n%0.2f%% elements" % (percent_ops, percent_elements) + label_suffix = "\n%d%% objects\n%d%% elements" % (int(percent_ops), int(percent_elements)) elif operation_type == "Initialized": source_node = "created_object" elif operation_type == "Filledin": @@ -147,7 +150,7 @@ else: print "Could not handle storage operation %s" % operation_type continue - result += "%s -> %s [label=\"%d (avg %0.2f)%s\"];" % (source_node, target_node, operations, float(elements)/float(operations), label_suffix) + result += "%s -> %s [label=\"%d objects\n%d elements per object%s\"];" % (source_node, target_node, operations, elements/operations, label_suffix) for type in self.incoming_operations: incoming_ops = self.incoming_operations[type] incoming_els = self.incoming_elements[type] @@ -159,8 +162,10 @@ else: remaining_ops = incoming_ops remaining_els = incoming_els - label += "\nRemaining objects: %d (%0.2f%%)" % (remaining_ops, float(remaining_ops)/incoming_ops*100) - label += "\nRemaining elements: %d (%0.2f%%)" % (remaining_els, float(remaining_els)/incoming_els*100) + percent_remaining_ops = float(remaining_ops) / incoming_ops * 100 + percent_remaining_els = float(remaining_els) / incoming_els * 100 + label += "\nRemaining objects: %d (%d%%)" % (remaining_ops, int(percent_remaining_ops)) + label += "\nRemaining elements: %d (%d%%)" % (remaining_els, int(percent_remaining_els)) result += "%s [label=\"%s%s\"];" % (type, type, label) result += "}" return result @@ -192,6 +197,8 @@ _stats.add_module(_detailedcollector) if dot: _stats.add_module(_dotcollector) + # Start a comment in order to make the entire output valid dot code. Hack. + print "/*" def print_statistics(): _stats.print_results() From noreply at buildbot.pypy.org Wed May 7 21:16:16 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:16 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Small improvements in interpreter.py Message-ID: <20140507191616.977161C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r804:f70b00c04fab Date: 2014-05-07 14:27 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f70b00c04fab/ Log: Small improvements in interpreter.py diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -60,19 +60,6 @@ self.trace = trace self.trace_proxy = False - def interpret_with_w_frame(self, w_frame): - try: - self.loop(w_frame) - except ReturnFromTopLevel, e: - return e.object - - def should_trace(self, primitives=False): - if objectmodel.we_are_translated() or conftest.option is None: - return False - if not primitives: - return conftest.option.bc_trace - return conftest.option.prim_trace - def loop(self, w_active_context): # just a trampoline for the actual loop implemented in c_loop self._loop = True @@ -108,8 +95,7 @@ pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): - self.quick_check_for_interrupt(s_context, - dec=self._get_adapted_tick_counter()) + self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, s_context=s_context) @@ -128,14 +114,6 @@ else: s_context.push(nlr.value) - def _get_adapted_tick_counter(self): - # Normally, the tick counter is decremented by 1 for every message send. - # Since we don't know how many messages are called during this trace, we - # just decrement by 100th of the trace length (num of bytecodes). - trace_length = jit.current_trace_length() - decr_by = int(trace_length // 100) - return max(decr_by, 1) - def stack_frame(self, s_new_frame, may_context_switch=True): if not self._loop: return s_new_frame # this test is done to not loop in test, @@ -150,30 +128,17 @@ self.remaining_stack_depth += 1 return retval - def perform(self, w_receiver, selector, *arguments_w): - if isinstance(selector, str): - if selector == "asSymbol": - w_selector = self.image.w_asSymbol - else: - w_selector = self.perform(self.space.wrap_string(selector), - "asSymbol") - else: - w_selector = selector - - w_method = model.W_CompiledMethod(self.space, header=512) - w_method.literalatput0(self.space, 1, w_selector) - assert len(arguments_w) <= 7 - w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethod - s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) - s_frame.push(w_receiver) - s_frame.push_all(list(arguments_w)) - - self.interrupt_check_counter = self.interrupt_counter_size - try: - self.loop(s_frame.w_self()) - except ReturnFromTopLevel, e: - return e.object - + # ============== Methods for handling user interrupts ============== + + def jitted_check_for_interrupt(self, s_frame): + # Normally, the tick counter is decremented by 1 for every message send. + # Since we don't know how many messages are called during this trace, we + # just decrement by 100th of the trace length (num of bytecodes). + trace_length = jit.current_trace_length() + decr_by = int(trace_length // 100) + decr_by = max(decr_by, 1) + self.quick_check_for_interrupt(s_frame, decr_by) + def quick_check_for_interrupt(self, s_frame, dec=1): self.interrupt_check_counter -= dec if self.interrupt_check_counter <= 0: @@ -206,9 +171,47 @@ from rpython.rlib.rarithmetic import intmask return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) - def padding(self, symbol=' '): + # ============== Methods for the tracing functionality ============== + + def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) + + def should_trace(self, primitives=False): + if objectmodel.we_are_translated() or conftest.option is None: + return False + if not primitives: + return conftest.option.bc_trace + return conftest.option.prim_trace + + # ============== Convenience methods for executing code ============== + + def interpret_toplevel(self, w_frame): + try: + self.loop(w_frame) + except ReturnFromTopLevel, e: + return e.object + + def perform(self, w_receiver, selector, *arguments_w): + if isinstance(selector, str): + if selector == "asSymbol": + w_selector = self.image.w_asSymbol + else: + w_selector = self.perform(self.space.wrap_string(selector), + "asSymbol") + else: + w_selector = selector + w_method = model.W_CompiledMethod(self.space, header=512) + w_method.literalatput0(self.space, 1, w_selector) + assert len(arguments_w) <= 7 + w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethod + s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) + s_frame.push(w_receiver) + s_frame.push_all(list(arguments_w)) + + self.interrupt_check_counter = self.interrupt_counter_size + return self.interpret_toplevel(s_frame.w_self()) + class ReturnFromTopLevel(Exception): _attrs_ = ["object"] def __init__(self, object): diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -461,7 +461,7 @@ s_frame.w_method().setliterals(literals) s_frame.push(w_object) s_frame.push(space.wrap_int(8)) - result = interp.interpret_with_w_frame(w_frame) + result = interp.interpret_toplevel(w_frame) assert space.unwrap_int(result) == 34 def test_send_to_primitive(): @@ -701,7 +701,7 @@ bcode = "".join([chr(x) for x in bcodes]) w_frame, s_frame = new_frame(bcode, receiver=receiver) s_frame.w_method().setliterals(literals) - return interp.interpret_with_w_frame(w_frame) + return interp.interpret_toplevel(w_frame) # tests: bytecodePrimValue & bytecodePrimValueWithArg def test_bc_3_plus_4(): diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -214,7 +214,7 @@ ap.store_suspended_context(space.w_nil) interp = interpreter.Interpreter(space) - interp.interpret_with_w_frame(w_ctx) + interp.interpret_toplevel(w_ctx) def test_compile_method(): sourcecode = """fib diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -62,7 +62,7 @@ assert isinstance(w_ctx, model.W_PointersObject) ap.store_suspended_context(space.w_nil) try: - return interp.interpret_with_w_frame(w_ctx) + return interp.interpret_toplevel(w_ctx) except error.Exit, e: print e.msg From noreply at buildbot.pypy.org Wed May 7 21:16:17 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:17 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed unused variables, some more consistency fixes in interpreter.py Message-ID: <20140507191617.961301C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r805:b3fb10ed48fe Date: 2014-05-07 14:47 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b3fb10ed48fe/ Log: Removed unused variables, some more consistency fixes in interpreter.py diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -26,8 +26,7 @@ _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", "startup_time", "evented"] - _w_last_active_context = None - _last_indent = "" + jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], reds=['s_context'], @@ -82,7 +81,7 @@ s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: - if self.trace: + if self.tracing(): print "====== Switch from: %s to: %s ======" % (s_new_context.short_str(), p.s_new_context.short_str()) s_new_context = p.s_new_context @@ -128,17 +127,17 @@ self.remaining_stack_depth += 1 return retval - # ============== Methods for handling user interrupts ============== - - def jitted_check_for_interrupt(self, s_frame): - # Normally, the tick counter is decremented by 1 for every message send. + # ============== Methods for handling user interrupts ============== + + def jitted_check_for_interrupt(self, s_frame): + # Normally, the tick counter is decremented by 1 for every message send. # Since we don't know how many messages are called during this trace, we # just decrement by 100th of the trace length (num of bytecodes). trace_length = jit.current_trace_length() decr_by = int(trace_length // 100) decr_by = max(decr_by, 1) - self.quick_check_for_interrupt(s_frame, decr_by) - + self.quick_check_for_interrupt(s_frame, decr_by) + def quick_check_for_interrupt(self, s_frame, dec=1): self.interrupt_check_counter -= dec if self.interrupt_check_counter <= 0: @@ -171,26 +170,29 @@ from rpython.rlib.rarithmetic import intmask return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) - # ============== Methods for the tracing functionality ============== - - def padding(self, symbol=' '): + # ============== Methods for the tracing functionality ============== + + def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) - - def should_trace(self, primitives=False): + + def tracing(self, check_conftest=False, primitives=False): + if not check_conftest: + return self.trace if objectmodel.we_are_translated() or conftest.option is None: return False - if not primitives: + if primitivies: + return conftest.option.prim_trace + else: return conftest.option.bc_trace - return conftest.option.prim_trace - - # ============== Convenience methods for executing code ============== - - def interpret_toplevel(self, w_frame): + + # ============== Convenience methods for executing code ============== + + def interpret_toplevel(self, w_frame): try: self.loop(w_frame) except ReturnFromTopLevel, e: return e.object - + def perform(self, w_receiver, selector, *arguments_w): if isinstance(selector, str): if selector == "asSymbol": @@ -210,8 +212,8 @@ s_frame.push_all(list(arguments_w)) self.interrupt_check_counter = self.interrupt_counter_size - return self.interpret_toplevel(s_frame.w_self()) - + return self.interpret_toplevel(s_frame.w_self()) + class ReturnFromTopLevel(Exception): _attrs_ = ["object"] def __init__(self, object): @@ -356,9 +358,9 @@ def _sendSelector(self, w_selector, argcount, interp, receiver, receiverclassshadow): - if interp.should_trace(): + if interp.tracing(check_conftest=True): print "%sSending selector #%s to %r with: %r" % ( - interp._last_indent, w_selector.str_content(), receiver, + interp.padding(), w_selector.str_content(), receiver, [self.peek(argcount-1-i) for i in range(argcount)]) assert argcount >= 0 @@ -378,7 +380,7 @@ self.pop() # receiver # ###################################################################### - if interp.trace: + if interp.tracing(): print interp.padding() + s_frame.short_str() return interp.stack_frame(s_frame) @@ -403,7 +405,7 @@ self.pop() # ###################################################################### - if interp.trace: + if interp.tracing(): print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.str_content()) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() @@ -412,23 +414,20 @@ def _call_primitive(self, code, interp, argcount, w_method, w_selector): # the primitive pushes the result (if any) onto the stack itself - if interp.should_trace(): - print "%sActually calling primitive %d" % (interp._last_indent, code,) + if interp.tracing(check_conftest=True): + print "%sActually calling primitive %d" % (interp.padding(), code,) func = primitives.prim_holder.prim_table[code] # ################################################################## - if interp.trace: + if interp.tracing(): print "%s-> primitive %d \t(in %s, named #%s)" % ( - ' ' * (interp.max_stack_depth - interp.remaining_stack_depth), - code, self.w_method().get_identifier_string(), w_selector.str_content()) + interp.padding(), code, self.w_method().get_identifier_string(), w_selector.str_content()) try: # note: argcount does not include rcvr return func(interp, self, argcount, w_method) except primitives.PrimitiveFailedError, e: - if interp.trace: - print "%s primitive FAILED" % ( - ' ' * (interp.max_stack_depth - interp.remaining_stack_depth),) - - if interp.should_trace(True): + if interp.tracing(): + print "%s primitive FAILED" % interp.padding() + if interp.tracing(check_conftest=True, primitives=True): print "PRIMITIVE FAILED: %d #%s" % (w_method.primitive, w_selector.str_content()) raise e @@ -440,7 +439,7 @@ # unfortunately, the assert below is not true for some tests # assert self._stack_ptr == self.tempsize() - if interp.trace: + if interp.tracing(): print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) raise Return(return_value, s_return_to) From noreply at buildbot.pypy.org Wed May 7 21:16:18 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:18 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Moved the _loop flag of Interpreter into a separate Subclass TestInterpreter used in tests. Message-ID: <20140507191618.9223E1C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r806:1f802c5946b7 Date: 2014-05-07 15:10 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1f802c5946b7/ Log: Moved the _loop flag of Interpreter into a separate Subclass TestInterpreter used in tests. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -38,6 +38,8 @@ evented=True, max_stack_depth=constants.MAX_LOOP_DEPTH): import time + + # === Initialize immutable variables self.space = space self.image = image self.image_name = image_name @@ -46,22 +48,21 @@ else: self.startup_time = constants.CompileTime self.max_stack_depth = max_stack_depth - self.remaining_stack_depth = max_stack_depth - self._loop = False - self.next_wakeup_tick = 0 self.evented = evented try: self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: self.interrupt_counter_size = constants.INTERRUPT_COUNTER_SIZE + + # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - # ###################################################################### + self.remaining_stack_depth = max_stack_depth + self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False def loop(self, w_active_context): # just a trampoline for the actual loop implemented in c_loop - self._loop = True s_new_context = w_active_context.as_context_get_shadow(self.space) while True: assert self.remaining_stack_depth == self.max_stack_depth @@ -114,9 +115,6 @@ s_context.push(nlr.value) def stack_frame(self, s_new_frame, may_context_switch=True): - if not self._loop: - return s_new_frame # this test is done to not loop in test, - # but rather step just once where wanted if self.remaining_stack_depth <= 1: raise StackOverflow(s_new_frame) @@ -180,7 +178,7 @@ return self.trace if objectmodel.we_are_translated() or conftest.option is None: return False - if primitivies: + if primitives: return conftest.option.prim_trace else: return conftest.option.bc_trace diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,6 +1,6 @@ import py, operator, sys from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants -from .util import create_space_interp, copy_to_module, cleanup_module, import_bytecodes +from .util import create_space_interp, copy_to_module, cleanup_module, import_bytecodes, TestInterpreter from spyvm.wrapper import PointWrapper from spyvm.conftest import option @@ -985,7 +985,7 @@ # ifTrue: [ 0 ] # ifFalse: [ (testBlock value: aNumber - 1) + aNumber ]]. # ^ testBlock value: 11 - interp = interpreter.Interpreter(space, max_stack_depth=3) + interp = TestInterpreter(space, max_stack_depth=3) #create a method with the correct bytecodes and a literal bytes = reduce(operator.add, map(chr, [0x8a, 0x01, 0x68, 0x10, 0x8f, 0x11, 0x00, 0x11, 0x10, 0x75, 0xb6, 0x9a, 0x75, 0xa4, 0x09, 0x8c, 0x00, 0x01, @@ -1007,7 +1007,7 @@ except interpreter.StackOverflow, e: assert False try: - interp = interpreter.Interpreter(space, None, "", max_stack_depth=10) + interp = TestInterpreter(space, image_name="", max_stack_depth=10) interp._loop = True interp.c_loop(w_method.create_frame(space, space.wrap_int(0), [])) except interpreter.StackOverflow, e: @@ -1015,7 +1015,7 @@ except interpreter.ReturnFromTopLevel, e: assert False -class StackTestInterpreter(interpreter.Interpreter): +class StackTestInterpreter(TestInterpreter): def stack_frame(self, w_frame, may_interrupt=True): stack_depth = self.max_stack_depth - self.remaining_stack_depth for i in range(stack_depth + 1): diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,6 +1,6 @@ import py, math from spyvm import squeakimage, model, constants, interpreter, shadow, objspace, wrapper, primitives -from .util import read_image, open_reader, copy_to_module, cleanup_module +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter def setup_module(): space, interp, image, reader = read_image("mini.image") @@ -213,7 +213,7 @@ w_ctx = ap.suspended_context() ap.store_suspended_context(space.w_nil) - interp = interpreter.Interpreter(space) + interp = TestInterpreter(space) interp.interpret_toplevel(w_ctx) def test_compile_method(): @@ -355,7 +355,7 @@ s_ctx = w_ctx.as_context_get_shadow(space) ap.store_suspended_context(space.w_nil) - interp = interpreter.Interpreter(space) + interp = TestInterpreter(space) assert isinstance(s_ctx, shadow.MethodContextShadow) assert s_ctx.top().is_same_object(space.w_true) interp.step(s_ctx) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -5,7 +5,7 @@ from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space, copy_to_module, cleanup_module +from .util import create_space, copy_to_module, cleanup_module, TestInterpreter from .test_interpreter import _new_frame def setup_module(): @@ -47,7 +47,7 @@ frame = context for i in range(len(stack)): frame.as_context_get_shadow(space).push(stack[i]) - interp = interpreter.Interpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space, image_name=IMAGENAME) return interp, frame, len(stack) def _prim(space, code, stack, context = None): @@ -595,7 +595,7 @@ closure = space.newClosure(w_frame, 4, #pc size_arguments, copiedValues) s_initial_context.push_all([closure] + args) - interp = interpreter.Interpreter(space) + interp = TestInterpreter(space) s_active_context = prim_table[primitives.CLOSURE_VALUE + size_arguments](interp, s_initial_context, size_arguments) return s_initial_context, closure, s_active_context @@ -639,7 +639,7 @@ w_frame, s_context = new_frame("") s_context.push(space.w_Array) - interp = interpreter.Interpreter(space) + interp = TestInterpreter(space) prim_table[primitives.SOME_INSTANCE](interp, s_context, 0) w_1 = s_context.pop() assert w_1.getclass(space) is space.w_Array @@ -655,7 +655,7 @@ w_frame, s_context = new_frame("") s_context.push(space.w_Array) - interp = interpreter.Interpreter(space) + interp = TestInterpreter(space) w_1 = someInstances[0] assert w_1.getclass(space) is space.w_Array @@ -680,7 +680,7 @@ closure = space.newClosure(w_frame, 4, 0, []) s_frame = w_frame.as_methodcontext_get_shadow(space) - interp = interpreter.Interpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space, image_name=IMAGENAME) interp._loop = True try: diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -17,7 +17,7 @@ reader.initialize() image = squeakimage.SqueakImage() image.from_reader(space, reader) - interp = interpreter.Interpreter(space, image) + interp = TestInterpreter(space, image) return space, interp, image, reader def create_space(bootstrap = bootstrap_by_default): @@ -28,7 +28,7 @@ def create_space_interp(bootstrap = bootstrap_by_default): space = create_space(bootstrap) - interp = interpreter.Interpreter(space) + interp = TestInterpreter(space) return space, interp def find_symbol_in_methoddict_of(string, s_class): @@ -72,6 +72,21 @@ else: make_getter(entry) +# This interpreter allows fine grained control of the interpretation +# by manually stepping through the bytecodes, if _loop is set to False. +class TestInterpreter(interpreter.Interpreter): + _loop = False + + def loop(self, w_active_context): + self._loop = True + return interpreter.Interpreter.loop(self, w_active_context) + + def stack_frame(self, s_new_frame, may_context_switch=True): + if not self._loop: + return s_new_frame # this test is done to not loop in test, + # but rather step just once where wanted + return interpreter.Interpreter.stack_frame(self, s_new_frame, may_context_switch) + class BootstrappedObjSpace(objspace.ObjSpace): def bootstrap(self): From noreply at buildbot.pypy.org Wed May 7 21:16:19 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed initialization of strategy statistics and the test. Message-ID: <20140507191619.8D10E1C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r807:d6c359685152 Date: 2014-05-07 15:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d6c359685152/ Log: Fixed initialization of strategy statistics and the test. diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -13,8 +13,10 @@ return a[0] < b[0] class StorageStatistics(object): - modules = [] - using_classname = False + + def __init__(self): + self.modules = [] + self.using_classname = False def add_module(self, module): if module not in self.modules: @@ -73,7 +75,9 @@ pass class AbstractStatisticsCollector(StatisticsModule): - stats = {} + + def __init__(self): + self.stats = {} def storage_operation(self, operation_key, storage_size, element_classname): if not operation_key in self.stats: @@ -100,10 +104,14 @@ print "\t%s: %d times, avg size: %f" % (self.key_string(key), num, float(sum)/num) class DotStatisticsCollector(StatisticsCollector): - incoming_operations = {} - incoming_elements = {} - outgoing_operations = {} - outgoing_elements = {} + + def __init__(self): + AbstractStatisticsCollector.__init__(self) + self.incoming_operations = {} + self.incoming_elements = {} + self.outgoing_operations = {} + self.outgoing_elements = {} + def storage_operation(self, key, storage_size, element_classname): StatisticsCollector.storage_operation(self, key, storage_size, element_classname) source_type = key[1] diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -218,5 +218,7 @@ col.storage_operation(stats.make_key("Filledin", None, "new"), 10, None) col.storage_operation(stats.make_key("Filledin", None, "new"), 11, None) - assert col.dot_string() == "" + # The dot-code is correct, I checked ;) + assert col.dot_string() == \ + 'digraph G {loading_image [label="Image Loading",shape=box];created_object [label="Object Creation",shape=box];created_object -> old2 [label="1 objects\n15 elements per object"];loading_image -> new [label="2 objects\n10 elements per object"];old -> new [label="2 objects\n10 elements per object\n66% objects\n60% elements"];loading_image -> old2 [label="1 objects\n20 elements per object"];created_object -> old [label="3 objects\n11 elements per object"];old2 -> new [label="1 objects\n5 elements per object\n50% objects\n14% elements"];new -> new2 [label="1 objects\n10 elements per object\n20% objects\n21% elements"];new2 [label="new2\nIncoming objects: 1\nIncoming elements: 10\nRemaining objects: 1 (100%)\nRemaining elements: 10 (100%)"];new [label="new\nIncoming objects: 5\nIncoming elements: 46\nRemaining objects: 4 (80%)\nRemaining elements: 36 (78%)"];old2 [label="old2\nIncoming objects: 2\nIncoming elements: 35\nRemaining objects: 1 (50%)\nRemaining elements: 30 (85%)"];old [label="old\nIncoming objects: 3\nIncoming elements: 33\nRemaining objects: 1 (33%)\nRemaining elements: 13 (39%)"];}' \ No newline at end of file From noreply at buildbot.pypy.org Wed May 7 21:16:20 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Renamed c_loop to loop_bytecodes. Added Exception as documentation. Message-ID: <20140507191620.877D11C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r808:550297e474ff Date: 2014-05-07 15:43 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/550297e474ff/ Log: Renamed c_loop to loop_bytecodes. Added Exception as documentation. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -62,14 +62,15 @@ self.trace_proxy = False def loop(self, w_active_context): - # just a trampoline for the actual loop implemented in c_loop + # just a trampoline for the actual loop implemented in loop_bytecodes s_new_context = w_active_context.as_context_get_shadow(self.space) while True: assert self.remaining_stack_depth == self.max_stack_depth - # Need to save s_sender, c_loop will nil this on return + # Need to save s_sender, loop_bytecodes will nil this on return s_sender = s_new_context.s_sender() try: - s_new_context = self.c_loop(s_new_context) + self.loop_bytecodes(s_new_context) + raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: s_new_context = e.s_context except Return, nlr: @@ -86,7 +87,7 @@ print "====== Switch from: %s to: %s ======" % (s_new_context.short_str(), p.s_new_context.short_str()) s_new_context = p.s_new_context - def c_loop(self, s_context, may_context_switch=True): + def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) @@ -113,17 +114,17 @@ raise nlr else: s_context.push(nlr.value) - + + # This is just a wrapper around loop_bytecodes that handles the remaining_stack_depth mechanism def stack_frame(self, s_new_frame, may_context_switch=True): if self.remaining_stack_depth <= 1: raise StackOverflow(s_new_frame) self.remaining_stack_depth -= 1 try: - retval = self.c_loop(s_new_frame, may_context_switch) + self.loop_bytecodes(s_new_frame, may_context_switch) finally: self.remaining_stack_depth += 1 - return retval # ============== Methods for handling user interrupts ============== diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1009,7 +1009,7 @@ try: interp = TestInterpreter(space, image_name="", max_stack_depth=10) interp._loop = True - interp.c_loop(w_method.create_frame(space, space.wrap_int(0), [])) + interp.loop_bytecodes(w_method.create_frame(space, space.wrap_int(0), [])) except interpreter.StackOverflow, e: assert isinstance(e.s_context, shadow.MethodContextShadow) except interpreter.ReturnFromTopLevel, e: @@ -1019,7 +1019,7 @@ def stack_frame(self, w_frame, may_interrupt=True): stack_depth = self.max_stack_depth - self.remaining_stack_depth for i in range(stack_depth + 1): - assert sys._getframe(4 + i * 6).f_code.co_name == 'c_loop' + assert sys._getframe(4 + i * 6).f_code.co_name == 'loop_bytecodes' assert sys._getframe(5 + stack_depth * 6).f_code.co_name == 'loop' return interpreter.Interpreter.stack_frame(self, w_frame) From noreply at buildbot.pypy.org Wed May 7 21:16:21 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Changed the receiver of top-level methods (executed with -r) to nil (like a DoIt in the workspace). Message-ID: <20140507191621.84AC31C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r809:6b8d21d604c3 Date: 2014-05-07 16:43 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6b8d21d604c3/ Log: Changed the receiver of top-level methods (executed with -r) to nil (like a DoIt in the workspace). diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -108,7 +108,7 @@ self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: - if not s_context.is_closure_context() and s_context.w_method().primitive() == 198: + if not s_context.is_closure_context() and method.primitive() == 198: s_context.activate_unwind_context(self) s_context.mark_returned() raise nlr @@ -201,18 +201,19 @@ "asSymbol") else: w_selector = selector - + w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(arguments_w) <= 7 w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethod + w_method.set_lookup_class_and_name(w_receiver.getclass(self.space), "Interpreter.perform") s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) s_frame.push(w_receiver) s_frame.push_all(list(arguments_w)) - + self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) - + class ReturnFromTopLevel(Exception): _attrs_ = ["object"] def __init__(self, object): diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -52,7 +52,7 @@ s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) w_frame = s_frame.w_self() def interp_execute_frame(): - return interp.loop(w_frame) + return interp.interpret_toplevel(w_frame) return interp_execute_frame # ==== The following will pre-load images and build a jit based on methods from the entry-point module diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -68,26 +68,29 @@ def _run_code(interp, code, as_benchmark=False): import time - selector = "codeTest%d" % int(time.time()) + selector = "DoIt%d" % int(time.time()) space = interp.space + w_receiver = space.w_nil + w_receiver_class = w_receiver.getclass(space) try: w_result = interp.perform( - space.w_SmallInteger, + w_receiver_class, "compile:classified:notifying:", space.wrap_string("%s\r\n%s" % (selector, code)), space.wrap_string("spy-run-code"), space.w_nil ) + w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() except interpreter.ReturnFromTopLevel, e: print e.object return 1 except error.Exit, e: print e.msg return 1 - + if not as_benchmark: try: - w_result = interp.perform(space.wrap_int(0), selector) + w_result = interp.perform(w_receiver, selector) except interpreter.ReturnFromTopLevel, e: print e.object return 1 From noreply at buildbot.pypy.org Wed May 7 21:16:22 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 7 May 2014 21:16:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Implemented mustBeBoolean when sending a conditional jump message to a non-bool object. Message-ID: <20140507191622.D54EE1C340B@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r810:e958ff7ec197 Date: 2014-05-07 21:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e958ff7ec197/ Log: Implemented mustBeBoolean when sending a conditional jump message to a non-bool object. Added test, extended the test image. Cleaned up tracing functionality in interpreter. diff --git a/images/running-something-mini.image b/images/running-something-mini.image index ac12f8bf556e24ceb6046e03ef26d92e1e1201bf..e121d1f56f155c8c7184bb3dde82edcdbcc44d9a GIT binary patch [cut] diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -141,6 +141,7 @@ "smalltalkdict" : SO_SMALLTALK, "display" : SO_DISPLAY_OBJECT, "doesNotUnderstand" : SO_DOES_NOT_UNDERSTAND, + "mustBeBoolean" : SO_MUST_BE_BOOLEAN, "interrupt_semaphore" : SO_USER_INTERRUPT_SEMAPHORE, "timerSemaphore" : SO_TIMER_SEMAPHORE, } diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -83,7 +83,7 @@ s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: - if self.tracing(): + if self.trace: print "====== Switch from: %s to: %s ======" % (s_new_context.short_str(), p.s_new_context.short_str()) s_new_context = p.s_new_context @@ -169,21 +169,6 @@ from rpython.rlib.rarithmetic import intmask return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) - # ============== Methods for the tracing functionality ============== - - def padding(self, symbol=' '): - return symbol * (self.max_stack_depth - self.remaining_stack_depth) - - def tracing(self, check_conftest=False, primitives=False): - if not check_conftest: - return self.trace - if objectmodel.we_are_translated() or conftest.option is None: - return False - if primitives: - return conftest.option.prim_trace - else: - return conftest.option.bc_trace - # ============== Convenience methods for executing code ============== def interpret_toplevel(self, w_frame): @@ -191,7 +176,7 @@ self.loop(w_frame) except ReturnFromTopLevel, e: return e.object - + def perform(self, w_receiver, selector, *arguments_w): if isinstance(selector, str): if selector == "asSymbol": @@ -214,6 +199,9 @@ self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) + def padding(self, symbol=' '): + return symbol * (self.max_stack_depth - self.remaining_stack_depth) + class ReturnFromTopLevel(Exception): _attrs_ = ["object"] def __init__(self, object): @@ -358,17 +346,12 @@ def _sendSelector(self, w_selector, argcount, interp, receiver, receiverclassshadow): - if interp.tracing(check_conftest=True): - print "%sSending selector #%s to %r with: %r" % ( - interp.padding(), w_selector.str_content(), receiver, - [self.peek(argcount-1-i) for i in range(argcount)]) assert argcount >= 0 - try: w_method = receiverclassshadow.lookup(w_selector) except MethodNotFound: return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - + code = w_method.primitive() if code: try: @@ -380,11 +363,25 @@ self.pop() # receiver # ###################################################################### - if interp.tracing(): + if interp.trace: print interp.padding() + s_frame.short_str() return interp.stack_frame(s_frame) + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): + w_special_selector = self.space.objtable["w_" + special_selector] + s_class = receiver.class_shadow(self.space) + w_method = s_class.lookup(w_special_selector) + s_frame = w_method.create_frame(interp.space, receiver, w_args, self) + + # ###################################################################### + if interp.trace: + print '%s %s %s: #%s' % (interp.padding('#'), special_selector, s_frame.short_str(), w_args) + if not objectmodel.we_are_translated(): + import pdb; pdb.set_trace() + + return interp.stack_frame(s_frame) + def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): arguments = self.pop_and_return_n(argcount) w_message_class = self.space.classtable["w_Message"] @@ -393,45 +390,36 @@ w_message = s_message_class.new() w_message.store(self.space, 0, w_selector) w_message.store(self.space, 1, self.space.wrap_list(arguments)) - s_class = receiver.class_shadow(self.space) + self.pop() # The receiver, already known. + try: - w_method = s_class.lookup(self.space.objtable["w_doesNotUnderstand"]) + return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) except MethodNotFound: from spyvm.shadow import ClassShadow + s_class = receiver.class_shadow(self.space) assert isinstance(s_class, ClassShadow) - print "Missing doesDoesNotUnderstand in hierarchy of %s" % s_class.getname() + print "Missing doesNotUnderstand in hierarchy of %s" % s_class.getname() raise - s_frame = w_method.create_frame(interp.space, receiver, [w_message], self) - self.pop() - - # ###################################################################### - if interp.tracing(): - print '%s%s missing: #%s' % (interp.padding('#'), s_frame.short_str(), w_selector.str_content()) - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() - - return interp.stack_frame(s_frame) - + + def _mustBeBoolean(self, interp, receiver): + return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") + def _call_primitive(self, code, interp, argcount, w_method, w_selector): - # the primitive pushes the result (if any) onto the stack itself - if interp.tracing(check_conftest=True): - print "%sActually calling primitive %d" % (interp.padding(), code,) - func = primitives.prim_holder.prim_table[code] # ################################################################## - if interp.tracing(): + if interp.trace: print "%s-> primitive %d \t(in %s, named #%s)" % ( interp.padding(), code, self.w_method().get_identifier_string(), w_selector.str_content()) + func = primitives.prim_holder.prim_table[code] try: # note: argcount does not include rcvr + # the primitive pushes the result (if any) onto the stack itself return func(interp, self, argcount, w_method) except primitives.PrimitiveFailedError, e: - if interp.tracing(): - print "%s primitive FAILED" % interp.padding() - if interp.tracing(check_conftest=True, primitives=True): - print "PRIMITIVE FAILED: %d #%s" % (w_method.primitive, w_selector.str_content()) + if interp.trace: + print "%s primitive %d FAILED\t (in %s, named %s)" % ( + interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) raise e - def _return(self, return_value, interp, s_return_to): # for tests, when returning from the top-level context if s_return_to is None: @@ -439,7 +427,7 @@ # unfortunately, the assert below is not true for some tests # assert self._stack_ptr == self.tempsize() - if interp.tracing(): + if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) raise Return(return_value, s_return_to) @@ -642,10 +630,20 @@ def jump(self, offset): self.store_pc(self.pc() + offset) - def jumpConditional(self,bool,position): - if self.top() == bool: # XXX this seems wrong? + def jumpConditional(self, interp, expecting_true, position): + if expecting_true: + w_expected = interp.space.w_true + w_alternative = interp.space.w_false + else: + w_alternative = interp.space.w_true + w_expected = interp.space.w_false + + # Don't check the class, just compare with only two instances. + w_bool = self.pop() + if w_expected.is_same_object(w_bool): self.jump(position) - self.pop() + elif not w_alternative.is_same_object(w_bool): + self._mustBeBoolean(interp, w_bool) def shortJumpPosition(self, current_bytecode): return (current_bytecode & 7) + 1 @@ -654,8 +652,8 @@ self.jump(self.shortJumpPosition(current_bytecode)) def shortConditionalJump(self, interp, current_bytecode): - self.jumpConditional( - interp.space.w_false, self.shortJumpPosition(current_bytecode)) + # The conditional jump is "jump on false" + self.jumpConditional(interp, False, self.shortJumpPosition(current_bytecode)) def longUnconditionalJump(self, interp, current_bytecode): self.jump((((current_bytecode & 7) - 4) << 8) + self.getbytecode()) @@ -664,11 +662,10 @@ return ((current_bytecode & 3) << 8) + self.getbytecode() def longJumpIfTrue(self, interp, current_bytecode): - self.jumpConditional(interp.space.w_true, self.longJumpPosition(current_bytecode)) + self.jumpConditional(interp, True, self.longJumpPosition(current_bytecode)) def longJumpIfFalse(self, interp, current_bytecode): - self.jumpConditional(interp.space.w_false, self.longJumpPosition(current_bytecode)) - + self.jumpConditional(interp, False, self.longJumpPosition(current_bytecode)) bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -333,12 +333,23 @@ assert isinstance(w_dnu, model.W_BytesObject) assert w_dnu.as_string() == "doesNotUnderstand:" +def test_mustBeBoolean(): + w_mbb = interp.space.objtable["w_mustBeBoolean"] + assert isinstance(w_mbb, model.W_BytesObject) + assert w_mbb.as_string() == "mustBeBoolean" + def test_run_doesNotUnderstand(): space, interp, _, _ = read_image('running-something-mini.image') w_result = interp.perform(interp.space.wrap_int(0), "runningADNU") assert isinstance(w_result, model.W_BytesObject) assert w_result.as_string() == "foobarThis:doesNotExist:('pypy' 'heya' )" +def test_run_mustBeBoolean(): + space, interp, _, _ = read_image('running-something-mini.image') + w_result = interp.perform(interp.space.wrap_int(0), "runningMustBeBoolean") + assert isinstance(w_result, model.W_BytesObject) + assert w_result.as_string() == "mustBeBoolean has been called" + def test_Message(): w_message_cls = interp.space.w_Message assert w_message_cls is interp.space.classtable["w_Message"] @@ -349,13 +360,16 @@ assert isinstance(w_message, model.W_PointersObject) def test_step_run_something(): + # This test depends on the following code being executed in a workspace (the entire line): + # a := Smalltalk snapshotPrimitive. 1+2. + # This will save the image in a state that will satisfy the following test. + space, interp, _, _ = read_image('running-something-mini.image') ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) w_ctx = ap.suspended_context() s_ctx = w_ctx.as_context_get_shadow(space) ap.store_suspended_context(space.w_nil) - - interp = TestInterpreter(space) + assert isinstance(s_ctx, shadow.MethodContextShadow) assert s_ctx.top().is_same_object(space.w_true) interp.step(s_ctx) From noreply at buildbot.pypy.org Wed May 7 22:34:35 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 May 2014 22:34:35 +0200 (CEST) Subject: [pypy-commit] pypy separate-jit-compilation: initial checkin - a branch to try to compile the JIT separately from the Message-ID: <20140507203435.D84001C066C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: separate-jit-compilation Changeset: r71395:1589099e8e20 Date: 2014-05-06 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/1589099e8e20/ Log: initial checkin - a branch to try to compile the JIT separately from the interpreter diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2000,8 +2000,9 @@ self.staticdata.log(sys.exc_info()[0].__name__) raise - @specialize.arg(1) - def compile_and_run_once(self, jitdriver_sd, *args): + def compile_and_run_once(self, jitdriver_sd, + green_args_i, green_args_r, green_args_f, + args_i, args_r, args_f): # NB. we pass explicity 'jitdriver_sd' around here, even though it # is also available as 'self.jitdriver_sd', because we need to # specialize this function and a few other ones for the '*args'. @@ -2012,7 +2013,9 @@ self.staticdata.try_to_free_some_loops() self.create_empty_history() try: - original_boxes = self.initialize_original_boxes(jitdriver_sd, *args) + original_boxes = self.initialize_original_boxes( + jitdriver_sd, green_args_i, green_args_r, green_args_f, + args_i, args_r, args_f) return self._compile_and_run_once(original_boxes) finally: self.staticdata.profiler.end_tracing() @@ -2370,22 +2373,37 @@ if target_token is not token: compile.giveup() - @specialize.arg(1) - def initialize_original_boxes(self, jitdriver_sd, *args): + def initialize_original_boxes(self, jitdriver_sd, + greens_i, greens_r, greens_f, + args_i, args_r, args_f): original_boxes = [] self._fill_original_boxes(jitdriver_sd, original_boxes, - jitdriver_sd.num_green_args, *args) + greens_i, greens_r, greens_f, args_i, args_r, + args_f) return original_boxes - @specialize.arg(1) - def _fill_original_boxes(self, jitdriver_sd, original_boxes, - num_green_args, *args): - if args: - from rpython.jit.metainterp.warmstate import wrap - box = wrap(self.cpu, args[0], num_green_args > 0) + def _fill_original_boxes(self, jitdriver_sd, original_boxes, greens_i, + greens_r, greens_f, args_i, args_r, args_f): + from rpython.jit.metainterp.warmstate import wrap + + for ival in greens_i: + box = wrap(self.cpu, ival, True) original_boxes.append(box) - self._fill_original_boxes(jitdriver_sd, original_boxes, - num_green_args-1, *args[1:]) + for rval in greens_r: + box = wrap(self.cpu, rval, True) + original_boxes.append(box) + for fval in greens_f: + box = wrap(self.cpu, fval, True) + original_boxes.append(box) + for ival in args_i: + box = wrap(self.cpu, ival, False) + original_boxes.append(box) + for rval in args_r: + box = wrap(self.cpu, rval, False) + original_boxes.append(box) + for fval in args_f: + box = wrap(self.cpu, fval, False) + original_boxes.append(box) def initialize_state_from_start(self, original_boxes): # ----- make a new frame ----- diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -569,6 +569,8 @@ ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] jd.red_args_types = [history.getkind(v.concretetype) for v in reds_v] + jd.green_args_types = [history.getkind(v.concretetype) + for v in greens_v] jd.num_green_args = len(jd._green_args_spec) jd.num_red_args = len(jd.red_args_types) RESTYPE = graph.getreturnvar().concretetype diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -269,6 +269,19 @@ vinfo = jitdriver_sd.virtualizable_info index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args + num_green_ints = 0 + num_green_refs = 0 + for kind in jitdriver_sd.green_args_types: + if kind == 'int': + num_green_ints += 1 + elif kind == 'ref': + num_green_refs += 1 + num_green_floats = num_green_args - num_green_ints - num_green_refs + range_green_ints = unrolling_iterable(enumerate(range(num_green_ints))) + range_green_refs = unrolling_iterable(enumerate( + range(num_green_ints, num_green_ints + num_green_refs))) + range_green_floats = unrolling_iterable(enumerate( + range(num_green_ints + num_green_refs, num_green_args))) JitCell = self.make_jitcell_subclass() self.make_jitdriver_callbacks() confirm_enter_jit = self.confirm_enter_jit @@ -276,12 +289,18 @@ range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) # get a new specialized copy of the method ARGS = [] + num_red_ints = 0 + num_red_refs = 0 + num_red_floats = 0 for kind in jitdriver_sd.red_args_types: if kind == 'int': + num_red_ints += 1 ARGS.append(lltype.Signed) elif kind == 'ref': + num_red_refs += 1 ARGS.append(llmemory.GCREF) elif kind == 'float': + num_red_floats += 1 ARGS.append(longlong.FLOATSTORAGE) else: assert 0, kind @@ -289,6 +308,15 @@ cpu = self.cpu jitcounter = self.warmrunnerdesc.jitcounter + range_red_ints = unrolling_iterable(enumerate( + range(num_green_args, num_green_args + num_red_ints))) + range_red_refs = unrolling_iterable(enumerate( + range(num_green_args + num_red_ints, + num_green_args + num_red_ints + num_red_refs))) + range_red_floats = unrolling_iterable(enumerate( + range(num_green_args + num_red_ints + num_red_refs, + jitdriver_sd.num_red_args))) + def execute_assembler(loop_token, *args): # Call the backend to run the 'looptoken' with the given # input args. @@ -325,7 +353,26 @@ jitcounter.install_new_cell(hash, cell) cell.flags |= JC_TRACING try: - metainterp.compile_and_run_once(jitdriver_sd, *args) + green_ints = [0] * num_green_ints + green_refs = [lltype.nullptr(llmemory.GCREF.TO)] * num_green_refs + green_floats = [longlong.getfloatstorage(0.0)] * num_green_floats + red_ints = [0] * num_red_ints + red_refs = [lltype.nullptr(llmemory.GCREF.TO)] * num_red_refs + red_floats = [longlong.getfloatstorage(0.0)] * num_red_floats + for i, num in range_green_ints: + green_ints[i] = args[num] + for i, num in range_green_refs: + green_refs[i] = args[num] + for i, num in range_green_floats: + green_floats[i] = args[num] + for i, num in range_red_ints: + red_ints[i] = args[num] + for i, num in range_red_refs: + red_refs[i] = args[num] + for i, num in range_red_floats: + red_floats[i] = args[num] + metainterp.compile_and_run_once(jitdriver_sd, green_ints, + green_refs, green_floats, red_ints, red_refs, red_floats) finally: cell.flags &= ~JC_TRACING diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -512,9 +512,9 @@ # if reds are automatic, they won't be passed to jit_merge_point, so # _check_arguments will receive only the green ones (i.e., the ones # which are listed explicitly). So, it is fine to just ignore reds - self._somelivevars = set([name for name in + self._somelivevars = set([_name for _name in self.greens + (self.reds or []) - if '.' not in name]) + if '.' not in _name]) self._heuristic_order = {} # check if 'reds' and 'greens' are ordered self._make_extregistryentries() assert get_jitcell_at is None, "get_jitcell_at no longer used" From noreply at buildbot.pypy.org Wed May 7 22:34:37 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 May 2014 22:34:37 +0200 (CEST) Subject: [pypy-commit] pypy separate-jit-compilation: Make execute_token accept the same set of args for all the possibilities Message-ID: <20140507203437.0EFF01C066C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: separate-jit-compilation Changeset: r71396:bc8d0c269519 Date: 2014-05-06 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/bc8d0c269519/ Log: Make execute_token accept the same set of args for all the possibilities diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -290,12 +290,12 @@ compiled_loop_token._llgraph_loop = None model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) - def make_execute_token(self, *argtypes): + def make_execute_token(self): return self._execute_token - def _execute_token(self, loop_token, *args): + def _execute_token(self, loop_token, args_i, args_r, args_f): lltrace = loop_token.compiled_loop_token._llgraph_loop - frame = LLFrame(self, lltrace.inputargs, args) + frame = LLFrame(self, lltrace.inputargs, args_i + args_r + args_f) try: frame.execute(lltrace) assert False diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -214,16 +214,12 @@ frame.jf_descr = frame.jf_force_descr return lltype.cast_opaque_ptr(llmemory.GCREF, frame) - def make_execute_token(self, *ARGS): + def make_execute_token(self): FUNCPTR = lltype.Ptr(lltype.FuncType([llmemory.GCREF], llmemory.GCREF)) - lst = [(i, history.getkind(ARG)[0]) for i, ARG in enumerate(ARGS)] - kinds = unrolling_iterable(lst) - - def execute_token(executable_token, *args): + def execute_token(executable_token, args_i, args_r, args_f): clt = executable_token.compiled_loop_token - assert len(args) == clt._debug_nbargs # addr = executable_token._ll_function_addr func = rffi.cast(FUNCPTR, addr) @@ -237,16 +233,16 @@ prev_interpreter = LLInterpreter.current_interpreter LLInterpreter.current_interpreter = self.debug_ll_interpreter try: - for i, kind in kinds: - arg = args[i] - num = locs[i] - if kind == history.INT: - self.set_int_value(ll_frame, num, arg) - elif kind == history.FLOAT: - self.set_float_value(ll_frame, num, arg) - else: - assert kind == history.REF - self.set_ref_value(ll_frame, num, arg) + num = 0 + for arg_i in args_i: + self.set_int_value(ll_frame, locs[num], arg_i) + num += 1 + for arg_r in args_r: + self.set_ref_value(ll_frame, locs[num], arg_r) + num += 1 + for arg_f in args_f: + self.set_float_value(ll_frame, locs[num], arg_f) + num += 1 llop.gc_writebarrier(lltype.Void, ll_frame) ll_frame = func(ll_frame) finally: diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -73,17 +73,16 @@ """Print a disassembled version of looptoken to stdout""" raise NotImplementedError - def execute_token(self, looptoken, *args): + def execute_token(self, looptoken, args_i, args_r, args_f): """NOT_RPYTHON (for tests only) Execute the generated code referenced by the looptoken. When done, this returns a 'dead JIT frame' object that can be inspected with the get_latest_xxx() methods. """ - argtypes = [lltype.typeOf(x) for x in args] - execute = self.make_execute_token(*argtypes) - return execute(looptoken, *args) + execute = self.make_execute_token() + return execute(looptoken, args_i, args_r, args_f) - def make_execute_token(self, *argtypes): + def make_execute_token(self): """Must make and return an execute_token() function that will be called with the given argtypes. """ diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2194,15 +2194,18 @@ def _nontranslated_run_directly(self, live_arg_boxes, loop_token): "NOT_RPYTHON" - args = [] num_green_args = self.jitdriver_sd.num_green_args num_red_args = self.jitdriver_sd.num_red_args + args_i = [] + args_r = [] + args_f = [] for box in live_arg_boxes[num_green_args:num_green_args+num_red_args]: - if box.type == history.INT: args.append(box.getint()) - elif box.type == history.REF: args.append(box.getref_base()) - elif box.type == history.FLOAT: args.append(box.getfloatstorage()) + if box.type == history.INT: args_i.append(box.getint()) + elif box.type == history.REF: args_r.append(box.getref_base()) + elif box.type == history.FLOAT: args_f.append(box.getfloatstorage()) else: assert 0 - self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) + self.jitdriver_sd.warmstate.execute_assembler(loop_token, args_i, + args_r, args_f) def prepare_resume_from_failure(self, opnum, dont_change_position, deadframe): diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -267,7 +267,6 @@ metainterp_sd = warmrunnerdesc.metainterp_sd jitdriver_sd = self.jitdriver_sd vinfo = jitdriver_sd.virtualizable_info - index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args num_green_ints = 0 num_green_refs = 0 @@ -288,23 +287,21 @@ range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) # get a new specialized copy of the method - ARGS = [] num_red_ints = 0 num_red_refs = 0 num_red_floats = 0 for kind in jitdriver_sd.red_args_types: if kind == 'int': num_red_ints += 1 - ARGS.append(lltype.Signed) elif kind == 'ref': num_red_refs += 1 - ARGS.append(llmemory.GCREF) elif kind == 'float': num_red_floats += 1 - ARGS.append(longlong.FLOATSTORAGE) else: assert 0, kind - func_execute_token = self.cpu.make_execute_token(*ARGS) + index_of_virtualizable = (jitdriver_sd.index_of_virtualizable - + num_red_ints) + func_execute_token = self.cpu.make_execute_token() cpu = self.cpu jitcounter = self.warmrunnerdesc.jitcounter @@ -317,7 +314,7 @@ range(num_green_args + num_red_ints + num_red_refs, jitdriver_sd.num_red_args))) - def execute_assembler(loop_token, *args): + def execute_assembler(loop_token, args_i, args_r, args_f): # Call the backend to run the 'looptoken' with the given # input args. @@ -325,10 +322,10 @@ # state, to make sure we enter with vable_token being NONE # if vinfo is not None: - virtualizable = args[index_of_virtualizable] + virtualizable = args_r[index_of_virtualizable] vinfo.clear_vable_token(virtualizable) - deadframe = func_execute_token(loop_token, *args) + deadframe = func_execute_token(loop_token, args_i, args_r, args_f) # # Record in the memmgr that we just ran this loop, # so that it will keep it alive for a longer time @@ -420,11 +417,17 @@ return # extract and unspecialize the red arguments to pass to # the assembler - execute_args = () - for i in range_red_args: - execute_args += (unspecialize_value(args[i]), ) + args_i = [0] * num_red_ints + args_r = [lltype.nullptr(llmemory.GCREF.TO)] * num_red_refs + args_f = [longlong.getfloatstorage(0.0)] * num_red_floats + for i, num in range_red_ints: + args_i[i] = unspecialize_value(args[num]) + for i, num in range_red_refs: + args_r[i] = unspecialize_value(args[num]) + for i, num in range_red_floats: + args_f[i] = unspecialize_value(args[num]) # run it! this executes until interrupted by an exception - execute_assembler(procedure_token, *execute_args) + execute_assembler(procedure_token, args_i, args_r, args_f) assert 0, "should not reach this point" maybe_compile_and_run._dont_inline_ = True From noreply at buildbot.pypy.org Thu May 8 14:14:21 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 May 2014 14:14:21 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: add app-level hint_commit_soon for experimentation Message-ID: <20140508121421.9C7271C066C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71397:d86fc4a4ae43 Date: 2014-05-08 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/d86fc4a4ae43/ Log: add app-level hint_commit_soon for experimentation diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -5,7 +5,7 @@ try: from __pypy__ import thread as _thread - from __pypy__.thread import atomic, getsegmentlimit + from __pypy__.thread import atomic, getsegmentlimit, hint_commit_soon except ImportError: # Not a STM-enabled PyPy. We can still provide a version of 'atomic' # that is good enough for our purposes. With this limited version, @@ -19,6 +19,9 @@ def print_abort_info(mintime=0.0): pass + def hint_commit_soon(): + pass + else: import re, sys, linecache diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -41,6 +41,7 @@ 'longest_abort_info': 'interp_atomic.longest_abort_info', 'reset_longest_abort_info':'interp_atomic.reset_longest_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', + 'hint_commit_soon': 'interp_atomic.hint_commit_soon', 'error': 'space.fromcache(pypy.module.thread.error.Cache).w_error', } def activate(self, space): diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -76,3 +76,9 @@ if space.config.translation.stm: from rpython.rlib.rstm import reset_longest_abort_info reset_longest_abort_info() + + +def hint_commit_soon(space): + if space.config.translation.stm: + from rpython.rlib.rstm import hint_commit_soon + hint_commit_soon() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -50,6 +50,12 @@ if_there_is_no_other) @dont_look_inside +def hint_commit_soon(): + """As the name says, just a hint. Maybe calling it + several times in a row is more persuasive""" + llop.stm_hint_commit_soon(lltype.Void) + + at dont_look_inside def become_inevitable(): llop.stm_become_inevitable(lltype.Void) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -438,6 +438,8 @@ 'stm_should_break_transaction': LLOp(sideeffects=False), 'stm_set_transaction_length': LLOp(), + 'stm_hint_commit_soon': LLOp(), + 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, # see threadlocalref.py diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -43,6 +43,9 @@ # self.obj.prebuilt_hash +def stm_hint_commit_soon(funcgen, op): + return 'stmcb_commit_soon();' + def stm_register_thread_local(funcgen, op): return 'pypy_stm_register_thread_local();' From noreply at buildbot.pypy.org Thu May 8 15:01:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 May 2014 15:01:44 +0200 (CEST) Subject: [pypy-commit] stmgc default: Small fix. Without this, the first 255 transaction's read markers are Message-ID: <20140508130144.B6CB71D2C51@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1202:dcea4c839318 Date: 2014-05-08 15:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/dcea4c839318/ Log: Small fix. Without this, the first 255 transaction's read markers are written in the shared mmap, but then we mount a private zero mmap for every 255 transactions. It means the initial 255 transaction's read markers stay around in memory (I think), even if they are never accessed any more. diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -86,15 +86,16 @@ pr->callbacks_on_abort = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; + pr->pub.transaction_read_version = 0xff; } /* The pages are shared lazily, as remap_file_pages() takes a relatively long time for each page. - The read markers are initially zero, which is correct: - STM_SEGMENT->transaction_read_version never contains zero, - so a null read marker means "not read" whatever the - current transaction_read_version is. + The read markers are initially zero, but we set anyway + transaction_read_version to 0xff in order to force the first + transaction to "clear" the read markers by mapping a different, + private range of addresses. */ setup_sync(); From noreply at buildbot.pypy.org Thu May 8 15:01:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 May 2014 15:01:45 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a #define USE_REMAP_FILE_PAGES which can be turned off. Message-ID: <20140508130145.D66F71D2C51@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1203:31f9797a356c Date: 2014-05-08 15:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/31f9797a356c/ Log: Add a #define USE_REMAP_FILE_PAGES which can be turned off. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -209,6 +209,7 @@ }; static char *stm_object_pages; +static int stm_object_pages_fd; static stm_thread_local_t *stm_all_thread_locals = NULL; static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -8,14 +8,10 @@ static char *fork_big_copy = NULL; +static int fork_big_copy_fd; static stm_thread_local_t *fork_this_tl; static bool fork_was_in_transaction; -static char *setup_mmap(char *reason); /* forward, in setup.c */ -static void setup_protection_settings(void); /* forward, in setup.c */ -static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ - - static bool page_is_null(char *p) { long *q = (long *)p; @@ -74,7 +70,8 @@ /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages */ - char *big_copy = setup_mmap("stmgc's fork support"); + int big_copy_fd; + char *big_copy = setup_mmap("stmgc's fork support", &big_copy_fd); /* Copy each of the segment infos into the new mmap, nurseries, and associated read markers @@ -139,6 +136,7 @@ assert(fork_big_copy == NULL); fork_big_copy = big_copy; + fork_big_copy_fd = big_copy_fd; fork_this_tl = this_tl; fork_was_in_transaction = was_in_transaction; @@ -163,6 +161,7 @@ assert(fork_big_copy != NULL); munmap(fork_big_copy, TOTAL_MEMORY); fork_big_copy = NULL; + close_fd_mmap(fork_big_copy_fd); bool was_in_transaction = fork_was_in_transaction; s_mutex_unlock(); @@ -214,6 +213,8 @@ if (res != stm_object_pages) stm_fatalerror("after fork: mremap failed: %m"); fork_big_copy = NULL; + close_fd_mmap(stm_object_pages_fd); + stm_object_pages_fd = fork_big_copy_fd; /* Unregister all other stm_thread_local_t, mostly as a way to free the memory used by the shadowstacks diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -81,9 +81,18 @@ can only be remapped to page N in another segment */ assert(((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0); +#ifdef USE_REMAP_FILE_PAGES int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) stm_fatalerror("remap_file_pages: %m"); +#else + char *res = mmap(addr, size, + PROT_READ | PROT_WRITE, + (MAP_PAGES_FLAGS & ~MAP_ANONYMOUS) | MAP_FIXED, + stm_object_pages_fd, pgoff * 4096UL); + if (UNLIKELY(res != addr)) + stm_fatalerror("mmap (remapping page): %m"); +#endif } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) @@ -169,6 +178,7 @@ static void pages_setup_readmarkers_for_nursery(void) { +#ifdef USE_REMAP_FILE_PAGES /* The nursery page's read markers are never read, but must still be writeable. We'd like to map the pages to a general "trash page"; missing one, we remap all the pages over to the same one. @@ -187,4 +197,5 @@ /* errors here ignored */ } } +#endif } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -19,6 +19,8 @@ #define PAGE_FLAG_START END_NURSERY_PAGE #define PAGE_FLAG_END NB_PAGES +#define USE_REMAP_FILE_PAGES + struct page_shared_s { #if NB_SEGMENTS <= 8 uint8_t by_segment; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -3,7 +3,8 @@ #endif -static char *setup_mmap(char *reason) +#ifdef USE_REMAP_FILE_PAGES +static char *setup_mmap(char *reason, int *ignored) { char *result = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, @@ -13,6 +14,45 @@ return result; } +static void close_fd_mmap(int ignored) +{ +} +#else +#include /* For O_* constants */ +static char *setup_mmap(char *reason, int *map_fd) +{ + char name[128]; + sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", + (long)getpid(), get_stm_time()); + + /* Create the big shared memory object, and immediately unlink it. + There is a small window where if this process is killed the + object is left around. It doesn't seem possible to do anything + about it... + */ + int fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); + shm_unlink(name); + + if (fd == -1) { + stm_fatalerror("%s failed (stm_open): %m", reason); + } + if (ftruncate(fd, TOTAL_MEMORY) != 0) { + stm_fatalerror("%s failed (ftruncate): %m", reason); + } + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS & ~MAP_ANONYMOUS, fd, 0); + if (result == MAP_FAILED) { + stm_fatalerror("%s failed (mmap): %m", reason); + } + *map_fd = fd; + return result; +} +static void close_fd_mmap(int map_fd) +{ + close(map_fd); +} +#endif static void setup_protection_settings(void) { @@ -56,7 +96,8 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()", + &stm_object_pages_fd); setup_protection_settings(); long i; @@ -127,6 +168,7 @@ munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; + close_fd_mmap(stm_object_pages_fd); teardown_core(); teardown_sync(); diff --git a/c7/stm/setup.h b/c7/stm/setup.h new file mode 100644 --- /dev/null +++ b/c7/stm/setup.h @@ -0,0 +1,5 @@ + +static char *setup_mmap(char *reason, int *map_fd); +static void close_fd_mmap(int map_fd); +static void setup_protection_settings(void); +static pthread_t *_get_cpth(stm_thread_local_t *); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -7,6 +7,7 @@ #include "stm/pages.h" #include "stm/gcpage.h" #include "stm/sync.h" +#include "stm/setup.h" #include "stm/largemalloc.h" #include "stm/nursery.h" #include "stm/contention.h" From noreply at buildbot.pypy.org Thu May 8 15:15:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 May 2014 15:15:31 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix duhton Message-ID: <20140508131531.EA11D1C1041@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1204:2504945a518f Date: 2014-05-08 15:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/2504945a518f/ Log: Fix duhton diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -35,6 +35,8 @@ trace((struct DuObject_s *)obj, visit); } +void stmcb_commit_soon(void) { } + DuObject *DuObject_New(DuType *tp) { From noreply at buildbot.pypy.org Thu May 8 17:32:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 8 May 2014 17:32:15 +0200 (CEST) Subject: [pypy-commit] stmgc default: try to reduce the commit-time overhead a bit (adds to all other threads as sync-pause time) Message-ID: <20140508153215.C6E061C327E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1205:b0dd12f874f8 Date: 2014-05-08 17:32 +0200 http://bitbucket.org/pypy/stmgc/changeset/b0dd12f874f8/ Log: try to reduce the commit-time overhead a bit (adds to all other threads as sync-pause time) diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -98,13 +98,14 @@ /************************************************************/ -static void contention_management(uint8_t other_segment_num, +static bool contention_management(uint8_t other_segment_num, enum contention_kind_e kind, object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); + bool others_may_have_run = false; if (must_abort()) abort_with_mutex(); @@ -152,6 +153,7 @@ if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { + others_may_have_run = true; /* Sleep. - Not for write-write contentions, because we're not at a @@ -225,6 +227,7 @@ if (must_abort()) abort_with_mutex(); + others_may_have_run = true; dprintf(("contention: wait C_ABORTED...\n")); cond_wait(C_ABORTED); dprintf(("contention: done\n")); @@ -278,6 +281,7 @@ stmcb_commit_soon(); } } + return others_may_have_run; } static void write_write_contention_management(uintptr_t lock_idx, @@ -301,10 +305,10 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num, +static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); + return contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,7 +1,7 @@ static void write_write_contention_management(uintptr_t lock_idx, object_t *obj); -static void write_read_contention_management(uint8_t other_segment_num, +static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -286,13 +286,15 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i, item); - - /* If we reach this point, we didn't abort, but maybe we - had to wait for the other thread to commit. If we - did, then we have to restart committing from our call - to synchronize_all_threads(). */ - return true; + if (write_read_contention_management(i, item)) { + /* If we reach this point, we didn't abort, but we + had to wait for the other thread to commit. If we + did, then we have to restart committing from our call + to synchronize_all_threads(). */ + return true; + } + /* we aborted the other transaction without waiting, so + we can just continue */ } })); } @@ -501,6 +503,9 @@ /* the call to minor_collection() above leaves us with STM_TIME_BOOKKEEPING */ + /* synchronize overflow objects living in privatized pages */ + push_overflow_objects_from_privatized_pages(); + s_mutex_lock(); restart: @@ -520,11 +525,11 @@ STM_SEGMENT->jmpbuf_ptr = NULL; /* if a major collection is required, do it here */ - if (is_major_collection_requested()) + if (is_major_collection_requested()) { + int oldstate = change_timing_state(STM_TIME_MAJOR_GC); major_collection_now_at_safe_point(); - - /* synchronize overflow objects living in privatized pages */ - push_overflow_objects_from_privatized_pages(); + change_timing_state(oldstate); + } /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); From noreply at buildbot.pypy.org Thu May 8 18:24:06 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Thu, 8 May 2014 18:24:06 +0200 (CEST) Subject: [pypy-commit] pypy tkinter_osx_packaging: better error message: this needs X11 headers too Message-ID: <20140508162406.2B5BF1C325A@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: tkinter_osx_packaging Changeset: r71398:5d8ce46c0acb Date: 2014-05-08 18:45 +0400 http://bitbucket.org/pypy/pypy/changeset/5d8ce46c0acb/ Log: better error message: this needs X11 headers too diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -12,7 +12,8 @@ try: from .tklib import tklib, tkffi except cffi.VerificationError: - raise ImportError("Tk headers and development libraries are required") + raise ImportError( + "Tk and X11 headers and development libraries are required") from .app import TkApp From noreply at buildbot.pypy.org Thu May 8 18:24:07 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Thu, 8 May 2014 18:24:07 +0200 (CEST) Subject: [pypy-commit] pypy tkinter_osx_packaging: OS/X specific header path Message-ID: <20140508162407.400381C325A@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: tkinter_osx_packaging Changeset: r71399:5783d7359f2b Date: 2014-05-08 18:47 +0400 http://bitbucket.org/pypy/pypy/changeset/5783d7359f2b/ Log: OS/X specific header path diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] From noreply at buildbot.pypy.org Thu May 8 18:24:08 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Thu, 8 May 2014 18:24:08 +0200 (CEST) Subject: [pypy-commit] pypy tkinter_osx_packaging: backout 5d8ce46c0acb - maybe it does not really need X11 headers on all platforms Message-ID: <20140508162408.692B81C325A@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: tkinter_osx_packaging Changeset: r71400:b6e026fbbc37 Date: 2014-05-08 18:58 +0400 http://bitbucket.org/pypy/pypy/changeset/b6e026fbbc37/ Log: backout 5d8ce46c0acb - maybe it does not really need X11 headers on all platforms diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -12,8 +12,7 @@ try: from .tklib import tklib, tkffi except cffi.VerificationError: - raise ImportError( - "Tk and X11 headers and development libraries are required") + raise ImportError("Tk headers and development libraries are required") from .app import TkApp From noreply at buildbot.pypy.org Thu May 8 18:24:09 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Thu, 8 May 2014 18:24:09 +0200 (CEST) Subject: [pypy-commit] pypy tkinter_osx_packaging: (alex_gaynor) use /Current/, it symlinks to the proper place Message-ID: <20140508162409.8226E1C325A@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: tkinter_osx_packaging Changeset: r71401:d873a874ad92 Date: 2014-05-08 19:06 +0400 http://bitbucket.org/pypy/pypy/changeset/d873a874ad92/ Log: (alex_gaynor) use /Current/, it symlinks to the proper place diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -122,7 +122,7 @@ linklibs = ['tcl85', 'tk85'] libdirs = [] elif sys.platform == 'darwin': - incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers/'] + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] linklibs = ['tcl', 'tk'] libdirs = [] else: From noreply at buildbot.pypy.org Thu May 8 18:24:10 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:24:10 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Merged in kostialopuhin/pypy/tkinter_osx_packaging (pull request #238) Message-ID: <20140508162410.9CE9F1C325A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71402:9c855807e9e0 Date: 2014-05-08 19:23 +0300 http://bitbucket.org/pypy/pypy/changeset/9c855807e9e0/ Log: Merged in kostialopuhin/pypy/tkinter_osx_packaging (pull request #238) Fix tkinter packaging on OS/X diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] From noreply at buildbot.pypy.org Thu May 8 18:26:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 18:26:33 +0200 (CEST) Subject: [pypy-commit] pypy default: fix ioctl with negative code param on osx Message-ID: <20140508162633.2F73A1D2957@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71403:d03659ab55ea Date: 2014-05-08 09:25 -0700 http://bitbucket.org/pypy/pypy/changeset/d03659ab55ea/ Log: fix ioctl with negative code param on osx diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -62,8 +62,8 @@ fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) has_flock = cConfig.has_flock if has_flock: diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -11,7 +11,9 @@ os.unlink(i) class AppTestFcntl: - spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'rctime')) + spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', + 'select', 'rctime')) + def setup_class(cls): tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = cls.space.wrap(tmpprefix) @@ -267,6 +269,31 @@ os.close(mfd) os.close(sfd) + def test_ioctl_signed_unsigned_code_param(self): + import fcntl + import os + import pty + import struct + import termios + + mfd, sfd = pty.openpty() + try: + if termios.TIOCSWINSZ < 0: + set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL + else: + set_winsz_opcode_pos = termios.TIOCSWINSZ + set_winsz_opcode_maybe_neg, = struct.unpack("i", + struct.pack("I", termios.TIOCSWINSZ)) + + our_winsz = struct.pack("HHHH",80,25,0,0) + # test both with a positive and potentially negative ioctl code + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz) + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz) + finally: + os.close(mfd) + os.close(sfd) + def test_large_flag(self): import sys if any(plat in sys.platform From noreply at buildbot.pypy.org Thu May 8 18:32:19 2014 From: noreply at buildbot.pypy.org (mswart) Date: Thu, 8 May 2014 18:32:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: stm: compile time stm checks assume everytime no stm, replace them Message-ID: <20140508163219.249351D2991@cobra.cs.uni-duesseldorf.de> Author: Malte Swart Branch: stmgc-c7 Changeset: r811:fe1ee893cfb1 Date: 2014-05-08 18:22 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/fe1ee893cfb1/ Log: stm: compile time stm checks assume everytime no stm, replace them rgc.is_stm_enabled first return None and is replace within a trancelation by the real one. This means that it is not possible to use them for compile time checks (at module import time) as it would always assume that stm is disabled. diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -521,16 +521,11 @@ w_frame.store(interp.space, constants.CTXPART_STACKP_INDEX, interp.space.wrap_int(stackp)) return w_frame - -def stm_enabled(): - """NOT RPYTHON""" +def get_instances_array(space, s_frame, w_class): from rpython.rlib import rgc - return hasattr(rgc, "stm_is_enabled") and rgc.stm_is_enabled() -if stm_enabled(): - def get_instances_array(space, s_frame, w_class): + if rgc.stm_is_enabled(): return [] -else: - def get_instances_array(space, s_frame, w_class): + else: # This primitive returns some instance of the class on the stack. # Not sure quite how to do this; maintain a weak list of all # existing instances or something? @@ -931,12 +926,13 @@ return w_rcvr -if not stm_enabled(): - # XXX: We don't have a global symbol cache. Instead, we get all - # method dictionary shadows (those exists for all methodDicts that - # have been modified) and flush them - @expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) - def func(interp, s_frame, w_rcvr): +# XXX: We don't have a global symbol cache. Instead, we get all +# method dictionary shadows (those exists for all methodDicts that +# have been modified) and flush them + at expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rgc + if not rgc.stm_is_enabled(): dicts_s = [] from rpython.rlib import rgc @@ -961,6 +957,8 @@ if s_dict.invalid: s_dict.sync_cache() return w_rcvr + else: + raise PrimitiveFailedError("SYMBOL_FLUSH_CACHE not implemented with STM") # ___________________________________________________________________________ # Miscellaneous Primitives (120-127) From noreply at buildbot.pypy.org Thu May 8 18:32:20 2014 From: noreply at buildbot.pypy.org (mswart) Date: Thu, 8 May 2014 18:32:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: stm: basis stm activation (primary transaction breaks) Message-ID: <20140508163220.376971D2991@cobra.cs.uni-duesseldorf.de> Author: Malte Swart Branch: stmgc-c7 Changeset: r812:77ddbd714cfb Date: 2014-05-08 18:24 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/77ddbd714cfb/ Log: stm: basis stm activation (primary transaction breaks) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -4,7 +4,7 @@ from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit +from rpython.rlib import jit, rstm from rpython.rlib import objectmodel, unroll class MissingBytecode(Exception): @@ -33,6 +33,7 @@ greens=['pc', 'self', 'method'], reds=['s_context'], virtualizables=['s_context'], + stm_do_transaction_breaks=True, get_printable_location=get_printable_location ) @@ -111,6 +112,8 @@ if jit.we_are_jitted(): self.quick_check_for_interrupt(s_context, dec=self._get_adapted_tick_counter()) + if rstm.jit_stm_should_break_transaction(True): + rstm.jit_stm_transaction_break_point() self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, s_context=s_context) @@ -118,6 +121,8 @@ self.jit_driver.jit_merge_point( pc=pc, self=self, method=method, s_context=s_context) + if rstm.jit_stm_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() try: self.step(s_context) except Return, nlr: From noreply at buildbot.pypy.org Thu May 8 18:32:21 2014 From: noreply at buildbot.pypy.org (mswart) Date: Thu, 8 May 2014 18:32:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: model: disable @jit.elidable for as_repr_string Message-ID: <20140508163221.509DD1D2991@cobra.cs.uni-duesseldorf.de> Author: Malte Swart Branch: stmgc-c7 Changeset: r813:014f5e42251f Date: 2014-05-08 18:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/014f5e42251f/ Log: model: disable @jit.elidable for as_repr_string Marking as_repr_string on models as @jit.elidable recreates rpython errors as it can not applied every time. As it is not important remove it for now. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -203,7 +203,6 @@ return r_uint(val) - @jit.elidable def as_repr_string(self): return "W_SmallInteger(%d)" % self.value @@ -457,7 +456,6 @@ name = self.s_class.name return "a %s" % (name or '?',) - @jit.elidable def as_repr_string(self): return self.as_embellished_string("W_O /w Class", "") @@ -632,7 +630,6 @@ w_other.changed() return True - @jit.elidable def as_repr_string(self): return W_AbstractObjectWithClassReference.as_embellished_string(self, className='W_PointersObject', From noreply at buildbot.pypy.org Thu May 8 18:33:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 18:33:52 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: fix ioctl with negative code param on osx Message-ID: <20140508163352.162121D2991@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: release-2.3.x Changeset: r71404:da8ff770beb2 Date: 2014-05-08 09:25 -0700 http://bitbucket.org/pypy/pypy/changeset/da8ff770beb2/ Log: fix ioctl with negative code param on osx diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -62,8 +62,8 @@ fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) has_flock = cConfig.has_flock if has_flock: diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -11,7 +11,9 @@ os.unlink(i) class AppTestFcntl: - spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'rctime')) + spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', + 'select', 'rctime')) + def setup_class(cls): tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = cls.space.wrap(tmpprefix) @@ -267,6 +269,31 @@ os.close(mfd) os.close(sfd) + def test_ioctl_signed_unsigned_code_param(self): + import fcntl + import os + import pty + import struct + import termios + + mfd, sfd = pty.openpty() + try: + if termios.TIOCSWINSZ < 0: + set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL + else: + set_winsz_opcode_pos = termios.TIOCSWINSZ + set_winsz_opcode_maybe_neg, = struct.unpack("i", + struct.pack("I", termios.TIOCSWINSZ)) + + our_winsz = struct.pack("HHHH",80,25,0,0) + # test both with a positive and potentially negative ioctl code + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz) + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz) + finally: + os.close(mfd) + os.close(sfd) + def test_large_flag(self): import sys if any(plat in sys.platform From noreply at buildbot.pypy.org Thu May 8 18:40:10 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:10 +0200 (CEST) Subject: [pypy-commit] pypy tkinter_osx_packaging: update whatsnew Message-ID: <20140508164010.AB0911C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: tkinter_osx_packaging Changeset: r71405:6bb854fd97a6 Date: 2014-05-08 19:27 +0300 http://bitbucket.org/pypy/pypy/changeset/6bb854fd97a6/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -167,3 +167,6 @@ .. branch: fix-tpname Changes hacks surrounding W_TypeObject.name to match CPython's tp_name + +.. branch: tkinter_osx_packaging +OS/X specific header path From noreply at buildbot.pypy.org Thu May 8 18:40:11 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:11 +0200 (CEST) Subject: [pypy-commit] pypy tkinter_osx_packaging: close branch for merging Message-ID: <20140508164011.C6C191C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: tkinter_osx_packaging Changeset: r71406:53535dbab4bd Date: 2014-05-08 19:28 +0300 http://bitbucket.org/pypy/pypy/changeset/53535dbab4bd/ Log: close branch for merging From noreply at buildbot.pypy.org Thu May 8 18:40:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:12 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge tkinter_osx_packaging into release Message-ID: <20140508164012.E28ED1C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71407:13e694aa9918 Date: 2014-05-08 19:29 +0300 http://bitbucket.org/pypy/pypy/changeset/13e694aa9918/ Log: merge tkinter_osx_packaging into release diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -167,3 +167,6 @@ .. branch: fix-tpname Changes hacks surrounding W_TypeObject.name to match CPython's tp_name + +.. branch: tkinter_osx_packaging +OS/X specific header path From noreply at buildbot.pypy.org Thu May 8 18:40:14 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Thu, 8 May 2014 18:40:14 +0200 (CEST) Subject: [pypy-commit] pypy default: OS/X specific header path Message-ID: <20140508164014.0BAF41C01DE@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: Changeset: r71408:0308ce1b8a22 Date: 2014-05-08 18:47 +0400 http://bitbucket.org/pypy/pypy/changeset/0308ce1b8a22/ Log: OS/X specific header path diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] From noreply at buildbot.pypy.org Thu May 8 18:40:15 2014 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Thu, 8 May 2014 18:40:15 +0200 (CEST) Subject: [pypy-commit] pypy default: (alex_gaynor) use /Current/, it symlinks to the proper place Message-ID: <20140508164015.2B5511C01DE@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: Changeset: r71409:b2cc67adbaad Date: 2014-05-08 19:06 +0400 http://bitbucket.org/pypy/pypy/changeset/b2cc67adbaad/ Log: (alex_gaynor) use /Current/, it symlinks to the proper place diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -122,7 +122,7 @@ linklibs = ['tcl85', 'tk85'] libdirs = [] elif sys.platform == 'darwin': - incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/8.5/Headers/'] + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] linklibs = ['tcl', 'tk'] libdirs = [] else: From noreply at buildbot.pypy.org Thu May 8 18:40:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:16 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew Message-ID: <20140508164016.401811C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71410:fecea0b0e138 Date: 2014-05-08 19:32 +0300 http://bitbucket.org/pypy/pypy/changeset/fecea0b0e138/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ec864bd08d50 +.. startrev: b2cc67adbaad From noreply at buildbot.pypy.org Thu May 8 18:40:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:17 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: update whatsnew Message-ID: <20140508164017.599B01C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71411:37c8f69432ed Date: 2014-05-08 19:32 +0300 http://bitbucket.org/pypy/pypy/changeset/37c8f69432ed/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ec864bd08d50 +.. startrev: b2cc67adbaad From noreply at buildbot.pypy.org Thu May 8 18:40:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:18 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: merge heads Message-ID: <20140508164018.739FA1C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.3.x Changeset: r71412:394146e9bb67 Date: 2014-05-08 19:36 +0300 http://bitbucket.org/pypy/pypy/changeset/394146e9bb67/ Log: merge heads diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -62,8 +62,8 @@ fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) has_flock = cConfig.has_flock if has_flock: diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -11,7 +11,9 @@ os.unlink(i) class AppTestFcntl: - spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'rctime')) + spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', + 'select', 'rctime')) + def setup_class(cls): tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = cls.space.wrap(tmpprefix) @@ -267,6 +269,31 @@ os.close(mfd) os.close(sfd) + def test_ioctl_signed_unsigned_code_param(self): + import fcntl + import os + import pty + import struct + import termios + + mfd, sfd = pty.openpty() + try: + if termios.TIOCSWINSZ < 0: + set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL + else: + set_winsz_opcode_pos = termios.TIOCSWINSZ + set_winsz_opcode_maybe_neg, = struct.unpack("i", + struct.pack("I", termios.TIOCSWINSZ)) + + our_winsz = struct.pack("HHHH",80,25,0,0) + # test both with a positive and potentially negative ioctl code + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz) + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz) + finally: + os.close(mfd) + os.close(sfd) + def test_large_flag(self): import sys if any(plat in sys.platform From noreply at buildbot.pypy.org Thu May 8 18:40:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:19 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140508164019.9781F1C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71413:272404841063 Date: 2014-05-08 19:37 +0300 http://bitbucket.org/pypy/pypy/changeset/272404841063/ Log: merge heads diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ec864bd08d50 +.. startrev: b2cc67adbaad From noreply at buildbot.pypy.org Thu May 8 18:40:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 18:40:20 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew Message-ID: <20140508164020.AF6A61C01DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r71414:e0ca4c047889 Date: 2014-05-08 19:27 +0300 http://bitbucket.org/pypy/pypy/changeset/e0ca4c047889/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -167,3 +167,6 @@ .. branch: fix-tpname Changes hacks surrounding W_TypeObject.name to match CPython's tp_name + +.. branch: tkinter_osx_packaging +OS/X specific header path From noreply at buildbot.pypy.org Thu May 8 20:04:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 8 May 2014 20:04:56 +0200 (CEST) Subject: [pypy-commit] pypy default: backout d492bd661190: this breaks a couple rpython tests Message-ID: <20140508180456.68C811D2B22@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71415:d4a4d951ddc2 Date: 2014-05-08 11:04 -0700 http://bitbucket.org/pypy/pypy/changeset/d4a4d951ddc2/ Log: backout d492bd661190: this breaks a couple rpython tests diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -331,8 +331,7 @@ ch2 = ord(s[pos]) # Check for low surrogate and combine the two to # form a UCS4 value - if ((allow_surrogates or MAXUNICODE < 65536) and - ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF): + if ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF: ch3 = ((ch - 0xD800) << 10 | (ch2 - 0xDC00)) + 0x10000 pos += 1 _encodeUCS4(result, ch3) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -803,20 +803,3 @@ u, len(u), True) == r'\ud800\udc00' assert runicode.unicode_encode_raw_unicode_escape( u, len(u), True) == r'\ud800\udc00' - - def test_encode_surrogate_pair_utf8(self): - u = runicode.UNICHR(0xD800) + runicode.UNICHR(0xDC00) - if runicode.MAXUNICODE < 65536: - # Narrow unicode build, consider utf16 surrogate pairs - assert runicode.unicode_encode_utf_8( - u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' - assert runicode.unicode_encode_utf_8( - u, len(u), True, allow_surrogates=False) == '\xf0\x90\x80\x80' - else: - # Wide unicode build, merge utf16 surrogate pairs only when allowed - assert runicode.unicode_encode_utf_8( - u, len(u), True, allow_surrogates=True) == '\xf0\x90\x80\x80' - # Surrogates not merged, encoding fails. - py.test.raises( - UnicodeEncodeError, runicode.unicode_encode_utf_8, - u, len(u), True, allow_surrogates=False) From noreply at buildbot.pypy.org Thu May 8 20:24:50 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 8 May 2014 20:24:50 +0200 (CEST) Subject: [pypy-commit] pypy default: update list of 'important' builds Message-ID: <20140508182450.2D9D51C1041@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71416:036ae5f44a4b Date: 2014-05-08 21:22 +0300 http://bitbucket.org/pypy/pypy/changeset/036ae5f44a4b/ Log: update list of 'important' builds diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -20,11 +20,12 @@ 'own-linux-x86-32', 'own-linux-x86-64', 'own-linux-armhf', + 'own-win-x86-32', # 'own-macosx-x86-32', # 'pypy-c-app-level-linux-x86-32', # 'pypy-c-app-level-linux-x86-64', # 'pypy-c-stackless-app-level-linux-x86-32', - 'pypy-c-app-level-win-x86-32', +# 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', 'pypy-c-jit-linux-x86-64', 'pypy-c-jit-macosx-x86-64', From noreply at buildbot.pypy.org Thu May 8 23:08:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 23:08:27 +0200 (CEST) Subject: [pypy-commit] pypy default: make struct pack helper func Message-ID: <20140508210827.9AF661D2C01@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71417:e09a947e8997 Date: 2014-05-06 12:05 -0400 http://bitbucket.org/pypy/pypy/changeset/e09a947e8997/ Log: make struct pack helper func diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -21,11 +21,6 @@ return space.fromcache(Cache).error - at unwrap_spec(format=str) -def calcsize(space, format): - return space.wrap(_calcsize(space, format)) - - def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -38,7 +33,11 @@ @unwrap_spec(format=str) -def pack(space, format, args_w): +def calcsize(space, format): + return space.wrap(_calcsize(space, format)) + + +def _pack(space, format, args_w): if jit.isconstant(format): size = _calcsize(space, format) else: @@ -50,13 +49,18 @@ raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: raise OperationError(get_error(space), space.wrap(e.msg)) - return space.wrap(fmtiter.result.build()) + return fmtiter.result.build() + + + at unwrap_spec(format=str) +def pack(space, format, args_w): + return space.wrap(_pack(space, format, args_w)) # XXX inefficient @unwrap_spec(format=str, offset=int) def pack_into(space, format, w_buffer, offset, args_w): - res = pack(space, format, args_w).str_w(space) + res = _pack(space, format, args_w) buf = space.writebuf_w(w_buffer) if offset < 0: offset += buf.getlength() From noreply at buildbot.pypy.org Thu May 8 23:08:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 23:08:28 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_boolobject when run -A against cpython Message-ID: <20140508210828.DA7B01D2C01@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71418:cc7e428d4ab1 Date: 2014-05-08 16:54 -0400 http://bitbucket.org/pypy/pypy/changeset/cc7e428d4ab1/ Log: fix test_boolobject when run -A against cpython diff --git a/pypy/objspace/std/test/test_boolobject.py b/pypy/objspace/std/test/test_boolobject.py --- a/pypy/objspace/std/test/test_boolobject.py +++ b/pypy/objspace/std/test/test_boolobject.py @@ -1,8 +1,4 @@ - - - class TestW_BoolObject: - def setup_method(self,method): self.true = self.space.w_True self.false = self.space.w_False @@ -29,6 +25,7 @@ def test_rbigint_w(self): assert self.space.bigint_w(self.true)._digits == [1] + class AppTestAppBoolTest: def test_bool_callable(self): assert True == bool(1) @@ -47,9 +44,9 @@ assert True.__int__() is 1 def test_bool_long(self): - assert long(True) is 1L - assert long(False) is 0L - assert True.__long__() is 1L + assert long(True) == 1L + assert long(False) == 0L + assert True.__long__() == 1L def test_bool_ops(self): assert True + True == 2 From noreply at buildbot.pypy.org Thu May 8 23:08:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 23:08:30 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix quotes in bytearray repr Message-ID: <20140508210830.274941D2C01@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71419:eefd0674de1d Date: 2014-05-08 17:05 -0400 http://bitbucket.org/pypy/pypy/changeset/eefd0674de1d/ Log: test/fix quotes in bytearray repr diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -221,7 +221,15 @@ # Good default if there are no replacements. buf = StringBuilder(len("bytearray(b'')") + len(s)) - buf.append("bytearray(b'") + buf.append("bytearray(b") + quote = "'" + for c in s: + if c == '"': + quote = "'" + break + elif c == "'": + quote = '"' + buf.append(quote) for i in range(len(s)): c = s[i] @@ -243,7 +251,8 @@ else: buf.append(c) - buf.append("')") + buf.append(quote) + buf.append(")") return space.wrap(buf.build()) diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -1,5 +1,6 @@ from pypy import conftest + class AppTestBytesArray: def setup_class(cls): cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) @@ -49,7 +50,10 @@ def test_repr(self): assert repr(bytearray()) == "bytearray(b'')" assert repr(bytearray('test')) == "bytearray(b'test')" - assert repr(bytearray("d'oh")) == r"bytearray(b'd\'oh')" + assert repr(bytearray("d'oh")) == r'bytearray(b"d\'oh")' + assert repr(bytearray('d"oh')) == 'bytearray(b\'d"oh\')' + assert repr(bytearray('d"\'oh')) == 'bytearray(b\'d"\\\'oh\')' + assert repr(bytearray('d\'"oh')) == 'bytearray(b\'d\\\'"oh\')' def test_str(self): assert str(bytearray()) == "" From noreply at buildbot.pypy.org Thu May 8 23:08:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 23:08:31 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140508210831.ADFA91D2C01@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71420:e6b55fa0713c Date: 2014-05-08 17:07 -0400 http://bitbucket.org/pypy/pypy/changeset/e6b55fa0713c/ Log: merge heads diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -348,4 +348,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* PyPy prints a random line from past #pypy IRC topics at startup in + interactive mode. In a released version, this behaviour is supressed, but + setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that + downstream package providers have been known to totally disable this feature. + .. include:: _ref.txt diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -1,5 +1,5 @@ ======================================= -PyPy 2.3 - Easier Than Ever +PyPy 2.3 - Terrestrial Arthropod Trap ======================================= We're pleased to announce PyPy 2.3, which targets version 2.7.6 of the Python diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -1,70 +1,77 @@ -====================== -Transactional Memory -====================== + +============================= +Software Transactional Memory +============================= .. contents:: This page is about ``pypy-stm``, a special in-development version of PyPy which can run multiple independent CPU-hungry threads in the same -process in parallel. It is side-stepping what is known in the Python -world as the "global interpreter lock (GIL)" problem. +process in parallel. It is a solution to what is known in the Python +world as the "global interpreter lock (GIL)" problem --- it is an +implementation of Python without the GIL. -"STM" stands for Software Transactional Memory, the technique used +"STM" stands for Software `Transactional Memory`_, the technique used internally. This page describes ``pypy-stm`` from the perspective of a user, describes work in progress, and finally gives references to more implementation details. -This work was done mostly by Remi Meier and Armin Rigo. Thanks to all -donors for crowd-funding the work so far! Please have a look at the -`2nd call for donation`_. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the `2nd call +for donation`_. +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory .. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% slower than a +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). -What you get in exchange for this slow-down is that ``pypy-stm`` runs -any multithreaded Python program on multiple CPUs at once. Programs -running two threads or more in parallel should ideally run faster than -in a regular PyPy, either now or soon as issues are fixed. In one way, -that's all there is to it: this is a GIL-less Python, feel free to -`download and try it`__. However, the deeper idea behind the -``pypy-stm`` project is to improve what is so far the state-of-the-art -for using multiple CPUs, which for cases where separate processes don't -work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is pushing forward an approach to *hide* the threads, as -described below in `atomic sections`_. +The benefit is that the resulting ``pypy-stm`` can execute multiple +threads of Python code in parallel. Programs running two threads or +more in parallel should ideally run faster than in a regular PyPy +(either now, or soon as bugs are fixed). +* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use + it as a drop-in replacement and multithreaded programs will run on + multiple cores. -.. __: +* ``pypy-stm`` does not impose any special API to the user, but it + provides a new pure Python module called `transactional_memory`_ with + features to inspect the state or debug conflicts_ that prevent + parallelization. This module can also be imported on top of a non-STM + PyPy or CPython. -Current status -============== +* Building on top of the way the GIL is removed, we will talk + about `Atomic sections, Transactions, etc.: a better way to write + parallel programs`_. + + +Getting Started +=============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu 12.04 binary -here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, -but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel, -in other words it is running a thread pool up to 4 threads emulating normal -threads. +interested in trying it out, you can download a Ubuntu binary here__ +(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are +release mode, but not stripped of debug symbols). The current version +supports four "segments", which means that it will run up to four +threads in parallel. To build a version from sources, you first need to compile a custom -version of clang; we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +version of clang(!); we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 `` for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for the very extensive usage that pypy-stm does of a -clang-only feature (without them, you get crashes of clang). Then get +they are fixes for a clang-only feature that hasn't been used so heavily +in the past (without the patches, you get crashes of clang). Then get the branch `stmgc-c7`_ of PyPy and run:: rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py @@ -75,23 +82,26 @@ .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ -Caveats: +.. _caveats: -* So far, small examples work fine, but there are still a number of - bugs. We're busy fixing them. +Current status +-------------- + +* So far, small examples work fine, but there are still a few bugs. + We're busy fixing them as we find them; feel free to `report bugs`_. * Currently limited to 1.5 GB of RAM (this is just a parameter in - `core.h`__). Memory overflows are not detected correctly, so may - cause segmentation faults. + `core.h`__). Memory overflows are not correctly handled; they cause + segfaults. -* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, - which is "only" bad). Moreover, you should run it with a command like - ``pypy-stm --jit trace_limit=60000 args...``; the default value of - 6000 for ``trace_limit`` is currently too low (6000 should become - reasonable again as we improve). Also, in order to produce machine - code, the JIT needs to enter a special single-threaded mode for now. - This all means that you *will* get very bad performance results if - your program doesn't run for *many* seconds for now. +* The JIT warm-up time improved recently but is still bad. In order to + produce machine code, the JIT needs to enter a special single-threaded + mode for now. This means that you will get bad performance results if + your program doesn't run for several seconds, where *several* can mean + *many.* When trying benchmarks, be sure to check that you have + reached the warmed state, i.e. the performance is not improving any + more. This should be clear from the fact that as long as it's + producing more machine code, ``pypy-stm`` will run on a single core. * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large @@ -108,111 +118,197 @@ * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in JIT-generated machine code). But the overall bookkeeping logic could - see more improvements (see Statistics_ below). - -* You can use `atomic sections`_, but the most visible missing thing is - that you don't get reports about the "conflicts" you get. This would - be the first thing that you need in order to start using atomic - sections more extensively. Also, for now: for better results, try to - explicitly force a transaction break just before (and possibly after) - each large atomic section, with ``time.sleep(0)``. + see more improvements (see `Low-level statistics`_ below). * Forking the process is slow because the complete memory needs to be - copied manually right now. + copied manually. A warning is printed to this effect. -* Very long-running processes should eventually crash on an assertion - error because of a non-implemented overflow of an internal 29-bit - number, but this requires at the very least ten hours --- more - probably, several days or more. +* Very long-running processes (on the order of days) will eventually + crash on an assertion error because of a non-implemented overflow of + an internal 29-bit number. .. _`report bugs`: https://bugs.pypy.org/ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h -Statistics +User Guide ========== + -When a non-main thread finishes, you get statistics printed to stderr, -looking like that:: +Drop-in replacement +------------------- - thread 0x7f73377fe600: - outside transaction 42182 0.506 s - run current 85466 0.000 s - run committed 34262 3.178 s - run aborted write write 6982 0.083 s - run aborted write read 550 0.005 s - run aborted inevitable 388 0.010 s - run aborted other 0 0.000 s - wait free segment 0 0.000 s - wait write read 78 0.027 s - wait inevitable 887 0.490 s - wait other 0 0.000 s - bookkeeping 51418 0.606 s - minor gc 162970 1.135 s - major gc 1 0.019 s - sync pause 59173 1.738 s - spin loop 129512 0.094 s +Multithreaded, CPU-intensive Python programs should work unchanged on +``pypy-stm``. They will run using multiple CPU cores in parallel. -The first number is a counter; the second number gives the associated -time (the amount of real time that the thread was in this state; the sum -of all the times should be equal to the total time between the thread's -start and the thread's end). The most important points are "run -committed", which gives the amount of useful work, and "outside -transaction", which should give the time spent e.g. in library calls -(right now it seems to be a bit larger than that; to investigate). -Everything else is overhead of various forms. (Short-, medium- and -long-term future work involves reducing this overhead :-) +The existing semantics of the GIL (Global Interpreter Lock) are +unchanged: although running on multiple cores in parallel, ``pypy-stm`` +gives the illusion that threads are run serially, with switches only +occurring between bytecodes, not in the middle of them. Programs can +rely on this: using ``shared_list.append()/pop()`` or +``shared_dict.setdefault()`` as synchronization mecanisms continues to +work as expected. -These statistics are not printed out for the main thread, for now. +This works by internally considering the points where a standard PyPy or +CPython would release the GIL, and replacing them with the boundaries of +"transaction". Like their database equivalent, multiple transactions +can execute in parallel, but will commit in some serial order. They +appear to behave as if they were completely run in this serialization +order. Atomic sections -=============== +--------------- -While one of the goal of pypy-stm is to give a GIL-free but otherwise -unmodified Python, the other goal is to push for a better way to use -multithreading. For this, you (as the Python programmer) get an API -in the ``__pypy__.thread`` submodule: +PyPy supports *atomic sections,* which are blocks of code which you want +to execute without "releasing the GIL". *This is experimental and may +be removed in the future.* In STM terms, this means blocks of code that +are executed while guaranteeing that the transaction is not interrupted +in the middle. -* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in - a ``with __pypy__.thread.atomic:`` statement). It runs the whole - block of code without breaking the current transaction --- from - the point of view of a regular CPython/PyPy, this is equivalent to - saying that the GIL will not be released at all between the start and - the end of this block of code. +Here is a usage example:: -The obvious usage is to use atomic blocks in the same way as one would -use locks: to protect changes to some shared data, you do them in a -``with atomic`` block, just like you would otherwise do them in a ``with -mylock`` block after ``mylock = thread.allocate_lock()``. This allows -you not to care about acquiring the correct locks in the correct order; -it is equivalent to having only one global lock. This is how -transactional memory is `generally described`__: as a way to efficiently -execute such atomic blocks, running them in parallel while giving the -illusion that they run in some serial order. + with __pypy__.thread.atomic: + assert len(lst1) == 10 + x = lst1.pop(0) + lst1.append(x) -.. __: http://en.wikipedia.org/wiki/Transactional_memory +In this (bad) example, we are sure that the item popped off one end of +the list is appened again at the other end atomically. It means that +another thread can run ``len(lst1)`` or ``x in lst1`` without any +particular synchronization, and always see the same results, +respectively ``10`` and ``True``. It will never see the intermediate +state where ``lst1`` only contains 9 elements. Atomic sections are +similar to re-entrant locks (they can be nested), but additionally they +protect against the concurrent execution of *any* code instead of just +code that happens to be protected by the same lock in other threads. -However, the less obvious intended usage of atomic sections is as a -wide-ranging replacement of explicit threads. You can turn a program -that is not multi-threaded at all into a program that uses threads -internally, together with large atomic sections to keep the behavior -unchanged. This capability can be hidden in a library or in the -framework you use; the end user's code does not need to be explicitly -aware of using threads. For a simple example of this, see -`transaction.py`_ in ``lib_pypy``. The idea is that if you have a -program where the function ``f(key, value)`` runs on every item of some -big dictionary, you can replace the loop with:: +Note that the notion of atomic sections is very strong. If you write +code like this:: + + with __pypy__.thread.atomic: + time.sleep(10) + +then, if you think about it as if we had a GIL, you are executing a +10-seconds-long atomic transaction without releasing the GIL at all. +This prevents all other threads from progressing at all. While it is +not strictly true in ``pypy-stm``, the exact rules for when other +threads can progress or not are rather complicated; you have to consider +it likely that such a piece of code will eventually block all other +threads anyway. + +Note that if you want to experiment with ``atomic``, you may have to add +manually a transaction break just before the atomic block. This is +because the boundaries of the block are not guaranteed to be the +boundaries of the transaction: the latter is at least as big as the +block, but maybe bigger. Therefore, if you run a big atomic block, it +is a good idea to break the transaction just before. This can be done +e.g. by the hack of calling ``time.sleep(0)``. (This may be fixed at +some point.) + +There are also issues with the interaction of locks and atomic blocks. +This can be seen if you write to files (which have locks), including +with a ``print`` to standard output. If one thread tries to acquire a +lock while running in an atomic block, and another thread has got the +same lock, then the former may fail with a ``thread.error``. The reason +is that "waiting" for some condition to become true --while running in +an atomic block-- does not really make sense. For now you can work +around it by making sure that, say, all your prints are either in an +``atomic`` block or none of them are. (This kind of issue is +theoretically hard to solve.) + + +Locks +----- + +**Not Implemented Yet** + +The thread module's locks have their basic semantic unchanged. However, +using them (e.g. in ``with my_lock:`` blocks) starts an alternative +running mode, called `Software lock elision`_. This means that PyPy +will try to make sure that the transaction extends until the point where +the lock is released, and if it succeeds, then the acquiring and +releasing of the lock will be "elided". This means that in this case, +the whole transaction will technically not cause any write into the lock +object --- it was unacquired before, and is still unacquired after the +transaction. + +This is specially useful if two threads run ``with my_lock:`` blocks +with the same lock. If they each run a transaction that is long enough +to contain the whole block, then all writes into the lock will be elided +and the two transactions will not conflict with each other. As usual, +they will be serialized in some order: one of the two will appear to run +before the other. Simply, each of them executes an "acquire" followed +by a "release" in the same transaction. As explained above, the lock +state goes from "unacquired" to "unacquired" and can thus be left +unchanged. + +This approach can gracefully fail: unlike atomic sections, there is no +guarantee that the transaction runs until the end of the block. If you +perform any input/output while you hold the lock, the transaction will +end as usual just before the input/output operation. If this occurs, +then the lock elision mode is cancelled and the lock's "acquired" state +is really written. + +Even if the lock is really acquired already, a transaction doesn't have +to wait for it to become free again. It can enter the elision-mode anyway +and tentatively execute the content of the block. It is only at the end, +when trying to commit, that the thread will pause. As soon as the real +value stored in the lock is switched back to "unacquired", it can then +proceed and attempt to commit its already-executed transaction (which +can fail and abort and restart from the scratch, as usual). + +Note that this is all *not implemented yet,* but we expect it to work +even if you acquire and release several locks. The elision-mode +transaction will extend until the first lock you acquired is released, +or until the code performs an input/output or a wait operation (for +example, waiting for another lock that is currently not free). In the +common case of acquiring several locks in nested order, they will all be +elided by the same transaction. + +.. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410 + + +Atomic sections, Transactions, etc.: a better way to write parallel programs +---------------------------------------------------------------------------- + +(This section is based on locks as we plan to implement them, but also +works with the existing atomic sections.) + +In the cases where elision works, the block of code can run in parallel +with other blocks of code *even if they are protected by the same lock.* +You still get the illusion that the blocks are run sequentially. This +works even for multiple threads that run each a series of such blocks +and nothing else, protected by one single global lock. This is +basically the Python application-level equivalent of what was done with +the interpreter in ``pypy-stm``: while you think you are writing +thread-unfriendly code because of this global lock, actually the +underlying system is able to make it run on multiple cores anyway. + +This capability can be hidden in a library or in the framework you use; +the end user's code does not need to be explicitly aware of using +threads. For a simple example of this, there is `transaction.py`_ in +``lib_pypy``. The idea is that you write, or already have, some program +where the function ``f(key, value)`` runs on every item of some big +dictionary, say:: + + for key, value in bigdict.items(): + f(key, value) + +Then you simply replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) transaction.run() This code runs the various calls to ``f(key, value)`` using a thread -pool, but every single call is done in an atomic section. The end -result is that the behavior should be exactly equivalent: you don't get -any extra multithreading issue. +pool, but every single call is executed under the protection of a unique +lock. The end result is that the behavior is exactly equivalent --- in +fact it makes little sense to do it in this way on a non-STM PyPy or on +CPython. But on ``pypy-stm``, the various locked calls to ``f(key, +value)`` can tentatively be executed in parallel, even if the observable +result is as if they were executed in some serial order. This approach hides the notion of threads from the end programmer, including all the hard multithreading-related issues. This is not the @@ -223,41 +319,176 @@ only requires that the end programmer identifies where this parallelism is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. - + .. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP -================== -Other APIs in pypy-stm: +.. _`transactional_memory`: -* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" - in this pypy-stm. This is the limit above which more threads will not - be able to execute on more cores. (Right now it is limited to 4 due - to inter-segment overhead, but should be increased in the future. It +API of transactional_memory +--------------------------- + +The new pure Python module ``transactional_memory`` runs on both CPython +and PyPy, both with and without STM. It contains: + +* ``getsegmentlimit()``: return the number of "segments" in + this pypy-stm. This is the limit above which more threads will not be + able to execute on more cores. (Right now it is limited to 4 due to + inter-segment overhead, but should be increased in the future. It should also be settable, and the default value should depend on the - number of actual CPUs.) + number of actual CPUs.) If STM is not available, this returns 1. -* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but - raises an exception if you attempt to nest it inside another - ``atomic``. +* ``print_abort_info(minimum_time=0.0)``: debugging help. Each thread + remembers the longest abort or pause it did because of cross-thread + contention_. This function prints it to ``stderr`` if the time lost + is greater than ``minimum_time`` seconds. The record is then + cleared, to make it ready for new events. This function returns + ``True`` if it printed a report, and ``False`` otherwise. -* ``__pypy__.thread.signals_enabled``: a context manager that runs - its block with signals enabled. By default, signals are only - enabled in the main thread; a non-main thread will not receive - signals (this is like CPython). Enabling signals in non-main threads - is useful for libraries where threads are hidden and the end user is - not expecting his code to run elsewhere than in the main thread. -Note that all of this API is (or will be) implemented in a regular PyPy -too: for example, ``with atomic`` will simply mean "don't release the -GIL" and ``getsegmentlimit()`` will return 1. +API of __pypy__.thread +---------------------- -================== +The ``__pypy__.thread`` submodule is a built-in module of PyPy that +contains a few internal built-in functions used by the +``transactional_memory`` module, plus the following: + +* ``__pypy__.thread.atomic``: a context manager to run a block in + fully atomic mode, without "releasing the GIL". (May be eventually + removed?) + +* ``__pypy__.thread.signals_enabled``: a context manager that runs its + block with signals enabled. By default, signals are only enabled in + the main thread; a non-main thread will not receive signals (this is + like CPython). Enabling signals in non-main threads is useful for + libraries where threads are hidden and the end user is not expecting + his code to run elsewhere than in the main thread. + + +.. _contention: + +Conflicts +--------- + +Based on Software Transactional Memory, the ``pypy-stm`` solution is +prone to "conflicts". To repeat the basic idea, threads execute their code +speculatively, and at known points (e.g. between bytecodes) they +coordinate with each other to agree on which order their respective +actions should be "committed", i.e. become globally visible. Each +duration of time between two commit-points is called a transaction. + +A conflict occurs when there is no consistent ordering. The classical +example is if two threads both tried to change the value of the same +global variable. In that case, only one of them can be allowed to +proceed, and the other one must be either paused or aborted (restarting +the transaction). If this occurs too often, parallelization fails. + +How much actual parallelization a multithreaded program can see is a bit +subtle. Basically, a program not using ``__pypy__.thread.atomic`` or +eliding locks, or doing so for very short amounts of time, will +parallelize almost freely (as long as it's not some artificial example +where, say, all threads try to increase the same global counter and do +nothing else). + +However, using if the program requires longer transactions, it comes +with less obvious rules. The exact details may vary from version to +version, too, until they are a bit more stabilized. Here is an +overview. + +Parallelization works as long as two principles are respected. The +first one is that the transactions must not *conflict* with each other. +The most obvious sources of conflicts are threads that all increment a +global shared counter, or that all store the result of their +computations into the same list --- or, more subtly, that all ``pop()`` +the work to do from the same list, because that is also a mutation of +the list. (It is expected that some STM-aware library will eventually +be designed to help with conflict problems, like a STM-aware queue.) + +A conflict occurs as follows: when a transaction commits (i.e. finishes +successfully) it may cause other transactions that are still in progress +to abort and retry. This is a waste of CPU time, but even in the worst +case senario it is not worse than a GIL, because at least one +transaction succeeds (so we get at worst N-1 CPUs doing useless jobs and +1 CPU doing a job that commits successfully). + +Conflicts do occur, of course, and it is pointless to try to avoid them +all. For example they can be abundant during some warm-up phase. What +is important is to keep them rare enough in total. + +Another issue is that of avoiding long-running so-called "inevitable" +transactions ("inevitable" is taken in the sense of "which cannot be +avoided", i.e. transactions which cannot abort any more). Transactions +like that should only occur if you use ``__pypy__.thread.atomic``, +generally become of I/O in atomic blocks. They work, but the +transaction is turned inevitable before the I/O is performed. For all +the remaining execution time of the atomic block, they will impede +parallel work. The best is to organize the code so that such operations +are done completely outside ``__pypy__.thread.atomic``. + +(This is related to the fact that blocking I/O operations are +discouraged with Twisted, and if you really need them, you should do +them on their own separate thread.) + +In case of lock elision, we don't get long-running inevitable +transactions, but a different problem can occur: doing I/O cancels lock +elision, and the lock turns into a real lock, preventing other threads +from committing if they also need this lock. (More about it when lock +elision is implemented and tested.) + + + +Implementation +============== + +XXX this section mostly empty for now + + +Low-level statistics +-------------------- + +When a non-main thread finishes, you get low-level statistics printed to +stderr, looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + sync commit soon 1 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + longest recordered marker 0.000826 s + "File "x.py", line 5, in f" + +On each line, the first number is a counter, and the second number gives +the associated time --- the amount of real time that the thread was in +this state. The sum of all the times should be equal to the total time +between the thread's start and the thread's end. The most important +points are "run committed", which gives the amount of useful work, and +"outside transaction", which should give the time spent e.g. in library +calls (right now it seems to be larger than that; to investigate). The +various "run aborted" and "wait" entries are time lost due to +conflicts_. Everything else is overhead of various forms. (Short-, +medium- and long-term future work involves reducing this overhead :-) + +The last two lines are special; they are an internal marker read by +``transactional_memory.print_abort_info()``. + +These statistics are not printed out for the main thread, for now. Reference to implementation details -=================================== +----------------------------------- The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more @@ -282,3 +513,15 @@ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py + + + +See also +======== + +See also +https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst +(section about STM). + + +.. include:: _ref.txt diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -167,3 +167,6 @@ .. branch: fix-tpname Changes hacks surrounding W_TypeObject.name to match CPython's tp_name + +.. branch: tkinter_osx_packaging +OS/X specific header path diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ec864bd08d50 +.. startrev: b2cc67adbaad diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,6 +7,11 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir +from pypy.module.sys.version import PYPY_VERSION +from lib_pypy._pypy_interact import irc_header + +is_release = PYPY_VERSION[3] == "final" + banner = sys.version.splitlines()[0] @@ -241,6 +246,10 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt + if is_release: + assert irc_header not in child.before + else: + assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -62,8 +62,8 @@ fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) has_flock = cConfig.has_flock if has_flock: diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -11,7 +11,9 @@ os.unlink(i) class AppTestFcntl: - spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'rctime')) + spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', + 'select', 'rctime')) + def setup_class(cls): tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = cls.space.wrap(tmpprefix) @@ -267,6 +269,31 @@ os.close(mfd) os.close(sfd) + def test_ioctl_signed_unsigned_code_param(self): + import fcntl + import os + import pty + import struct + import termios + + mfd, sfd = pty.openpty() + try: + if termios.TIOCSWINSZ < 0: + set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL + else: + set_winsz_opcode_pos = termios.TIOCSWINSZ + set_winsz_opcode_maybe_neg, = struct.unpack("i", + struct.pack("I", termios.TIOCSWINSZ)) + + our_winsz = struct.pack("HHHH",80,25,0,0) + # test both with a positive and potentially negative ioctl code + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz) + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz) + finally: + os.close(mfd) + os.close(sfd) + def test_large_flag(self): import sys if any(plat in sys.platform diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -195,4 +195,5 @@ import operator assert operator.index(42) == 42 assert operator.__index__(42) == 42 - raises(TypeError, operator.index, "abc") + exc = raises(TypeError, operator.index, "abc") + assert str(exc.value) == "'str' object cannot be interpreted as an index" diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -794,13 +794,18 @@ l = ["space.isinstance_w(w_result, %s)" % x for x in checkerspec] checker = " or ".join(l) + if targetname == 'index': + msg = "'%%T' object cannot be interpreted as an index" + else: + msg = "unsupported operand type for %(targetname)s(): '%%T'" + msg = msg % locals() source = """if 1: def %(targetname)s(space, w_obj): w_impl = space.lookup(w_obj, %(specialname)r) if w_impl is None: raise oefmt(space.w_TypeError, - "unsupported operand type for %(targetname)s(): " - "'%%T'", w_obj) + %(msg)r, + w_obj) w_result = space.get_and_call_function(w_impl, w_obj) if %(checker)s: diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -20,11 +20,12 @@ 'own-linux-x86-32', 'own-linux-x86-64', 'own-linux-armhf', + 'own-win-x86-32', # 'own-macosx-x86-32', # 'pypy-c-app-level-linux-x86-32', # 'pypy-c-app-level-linux-x86-64', # 'pypy-c-stackless-app-level-linux-x86-32', - 'pypy-c-app-level-win-x86-32', +# 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', 'pypy-c-jit-linux-x86-64', 'pypy-c-jit-macosx-x86-64', diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -17,13 +17,8 @@ if sys.platform.startswith("linux"): DEFL_ROOTFINDER_WITHJIT = "asmgcc" - ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] -elif compiler.name == 'msvc': - DEFL_ROOTFINDER_WITHJIT = "shadowstack" - ROOTFINDERS = ["n/a", "shadowstack"] else: DEFL_ROOTFINDER_WITHJIT = "shadowstack" - ROOTFINDERS = ["n/a", "shadowstack", "asmgcc"] IS_64_BITS = sys.maxint > 2147483647 @@ -91,7 +86,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ROOTFINDERS, + ["n/a", "shadowstack", "asmgcc"], "shadowstack", cmdline="--gcrootfinder", requires={ @@ -372,9 +367,10 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X + # disallow asmgcc on OS/X and on Win32 if config.translation.gcrootfinder == "asmgcc": - assert sys.platform != "darwin" + assert sys.platform != "darwin", "'asmgcc' not supported on OS/X" + assert sys.platform != "win32", "'asmgcc' not supported on Win32" # ---------------------------------------------------------------- diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -296,10 +296,11 @@ # trim: instructions with no framesize are removed from self.insns, # and from the 'previous_insns' lists - assert hasattr(self.insns[0], 'framesize') - old = self.insns[1:] - del self.insns[1:] - for insn in old: + if 0: # <- XXX disabled because it seems bogus, investigate more + assert hasattr(self.insns[0], 'framesize') + old = self.insns[1:] + del self.insns[1:] + for insn in old: if hasattr(insn, 'framesize'): self.insns.append(insn) insn.previous_insns = [previnsn for previnsn in insn.previous_insns diff --git a/rpython/translator/c/src/asm.c b/rpython/translator/c/src/asm.c --- a/rpython/translator/c/src/asm.c +++ b/rpython/translator/c/src/asm.c @@ -12,6 +12,6 @@ # include "src/asm_ppc.c" #endif -#if defined(MS_WINDOWS) && defined(_MSC_VER) +#if defined(_MSC_VER) # include "src/asm_msvc.c" #endif diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,5 +1,6 @@ #ifdef PYPY_X86_CHECK_SSE2 #include +#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Thu May 8 23:37:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 23:37:00 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix parsing of spaces between nums in complex constructor Message-ID: <20140508213700.7082C1C0685@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71421:84388c3ff677 Date: 2014-05-08 17:14 -0400 http://bitbucket.org/pypy/pypy/changeset/84388c3ff677/ Log: test/fix parsing of spaces between nums in complex constructor diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -50,10 +50,6 @@ realstop = i - # ignore whitespace - while i < slen and s[i] == ' ': - i += 1 - # return appropriate strings is only one number is there if i >= slen: newstop = realstop - 1 diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -82,9 +82,7 @@ class AppTestAppComplexTest: - spaceconfig = { - "usemodules": ["binascii", "rctime"] - } + spaceconfig = {"usemodules": ["binascii", "rctime"]} def w_check_div(self, x, y): """Compute complex z=x*y, and check that z/x==y and z/y==x.""" @@ -383,7 +381,6 @@ # assert cmath.polar(1) == (1.0, 0.0) raises(TypeError, "cmath.polar(Obj(1))") - def test_hash(self): for x in xrange(-30, 30): @@ -403,7 +400,9 @@ assert j(100 + 0j) == 100 + 0j assert isinstance(j(100), j) assert j(100L + 0j) == 100 + 0j - assert j("100 + 0j") == 100 + 0j + assert j("100+0j") == 100 + 0j + exc = raises(ValueError, j, "100 + 0j") + assert str(exc.value) == "complex() arg is a malformed string" x = j(1+0j) x.foo = 42 assert x.foo == 42 From noreply at buildbot.pypy.org Thu May 8 23:37:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 8 May 2014 23:37:01 +0200 (CEST) Subject: [pypy-commit] pypy default: more cleanups for complex parsing str Message-ID: <20140508213701.9D2831C0685@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71422:0cecb9c518a9 Date: 2014-05-08 17:36 -0400 http://bitbucket.org/pypy/pypy/changeset/0cecb9c518a9/ Log: more cleanups for complex parsing str diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -38,12 +38,14 @@ # ignore whitespace after bracket while i < slen and s[i] == ' ': i += 1 + while slen > 0 and s[slen-1] == ' ': + slen -= 1 # extract first number realstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E') and i != realstart: + if s[i] in ('+', '-') and pc not in ('e', 'E') and i != realstart: break pc = s[i] i += 1 @@ -71,10 +73,10 @@ # find sign for imaginary part if s[i] == '-' or s[i] == '+': imagsign = s[i] - if imagsign == ' ': + else: raise ValueError - i+=1 + i += 1 # whitespace while i < slen and s[i] == ' ': i += 1 @@ -84,7 +86,7 @@ imagstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E'): + if s[i] in ('+', '-') and pc not in ('e', 'E'): break pc = s[i] i += 1 @@ -92,14 +94,12 @@ imagstop = i - 1 if imagstop < 0: raise ValueError - if s[imagstop] not in ('j','J'): + if s[imagstop] not in ('j', 'J'): raise ValueError if imagstop < imagstart: raise ValueError - while i Author: Brian Kearns Branch: Changeset: r71423:2b4f2f6ed112 Date: 2014-05-08 17:43 -0400 http://bitbucket.org/pypy/pypy/changeset/2b4f2f6ed112/ Log: reject whitespace here too diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -77,9 +77,6 @@ raise ValueError i += 1 - # whitespace - while i < slen and s[i] == ' ': - i += 1 if i >= slen: raise ValueError diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -38,7 +38,6 @@ test_cparse('(1+2j)', '1', '2') test_cparse('(1-6j)', '1', '-6') test_cparse(' ( +3.14-6J )', '+3.14', '-6') - test_cparse(' ( +3.14- 6J ) ', '+3.14', '-6') test_cparse(' +J', '0.0', '1.0') test_cparse(' -J', '0.0', '-1.0') @@ -307,6 +306,8 @@ assert self.almost_equal(complex("-1"), -1) assert self.almost_equal(complex("+1"), +1) assert self.almost_equal(complex(" ( +3.14-6J ) "), 3.14-6j) + exc = raises(ValueError, complex, " ( +3.14- 6J ) ") + assert str(exc.value) == "complex() arg is a malformed string" class complex2(complex): pass From noreply at buildbot.pypy.org Fri May 9 00:32:25 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 00:32:25 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Added tag release-2.3 for changeset 394146e9bb67 Message-ID: <20140508223225.114D41C066C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71424:2c73362455f7 Date: 2014-05-09 01:23 +0300 http://bitbucket.org/pypy/pypy/changeset/2c73362455f7/ Log: Added tag release-2.3 for changeset 394146e9bb67 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -9,3 +9,4 @@ 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 From noreply at buildbot.pypy.org Fri May 9 00:39:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 9 May 2014 00:39:17 +0200 (CEST) Subject: [pypy-commit] pypy default: merge release-2.3.x Message-ID: <20140508223917.918301C066C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71425:181fef0fd807 Date: 2014-05-08 18:35 -0400 http://bitbucket.org/pypy/pypy/changeset/181fef0fd807/ Log: merge release-2.3.x diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,7 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.2' +version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.2.1' +release = '2.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.1`_: the latest official release +* `Release 2.3.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.1`: http://pypy.org/download.html +.. _`Release 2.3.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -113,6 +113,11 @@ generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. +``PYPY_IRC_TOPIC`` + If set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. + + .. include:: ../gc_info.rst :start-line: 7 diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -35,6 +35,9 @@ PYTHONPATH : %r-separated list of directories prefixed to the default module search path. The result is sys.path. PYTHONIOENCODING: Encoding[:errors] used for stdin/stdout/stderr. +PYPY_IRC_TOPIC: if set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. +PYPYLOG: If set to a non-empty value, enable logging. """ import sys @@ -668,7 +671,9 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - success = run_toplevel(interactive_console, mainmodule) + irc_topic = readenv and os.getenv('PYPY_IRC_TOPIC') + success = run_toplevel(interactive_console, mainmodule, + quiet=not irc_topic) except SystemExit, e: status = e.code else: diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.6" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0-alpha0" +#define PYPY_VERSION "2.3.0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 3, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 3, 0, "final", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Fri May 9 00:39:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 9 May 2014 00:39:18 +0200 (CEST) Subject: [pypy-commit] pypy default: update versions to 2.4.0-alpha0 Message-ID: <20140508223918.EA9AC1C066C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71426:f556d32f8319 Date: 2014-05-08 18:36 -0400 http://bitbucket.org/pypy/pypy/changeset/f556d32f8319/ Log: update versions to 2.4.0-alpha0 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.6" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0" +#define PYPY_VERSION "2.4.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 3, 0, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 4, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Fri May 9 00:44:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 9 May 2014 00:44:36 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140508224436.1D4D91C066C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71427:0125c920eee7 Date: 2014-05-08 18:44 -0400 http://bitbucket.org/pypy/pypy/changeset/0125c920eee7/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: b2cc67adbaad +.. startrev: f556d32f8319 From noreply at buildbot.pypy.org Fri May 9 08:26:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:14 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: release 2.3 Message-ID: <20140509062614.832171C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r494:9cfa715fc628 Date: 2014-05-09 02:22 +0300 http://bitbucket.org/pypy/pypy.org/changeset/9cfa715fc628/ Log: release 2.3 diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -3,13 +3,14 @@ title: Python compatibility --- -PyPy implements the Python language version 2.7.3. It supports all of the core +PyPy implements the Python language version 2.7.6. It supports all of the core language, passing Python test suite (with minor modifications that were already accepted in the main python in newer versions). It supports most of the commonly used Python `standard library modules`_; details below. PyPy has **alpha/beta-level** support for the `CPython C API`_, however, as of -2.2 release this feature is not yet complete. Many libraries will require +2.3 release this feature is not yet complete. We strongly advise use of `CFFI`_ +instead. CFFI come builtin with PyPy. Many libraries will require a bit of effort to work, but there are known success stories. Check out PyPy blog for updates, as well as the `Compatibility Wiki`__. @@ -98,6 +99,7 @@ A more complete list is available at `our dev site`_. .. _`CPython C API`: http://docs.python.org/c-api/ +.. _`CFFI`: http://cffi.readthedocs.org/ .. _`standard library modules`: http://docs.python.org/library/ .. _`our dev site`: http://pypy.readthedocs.org/en/latest/cpython_differences.html .. _`more details here`: http://pypy.readthedocs.org/en/latest/cpython_differences.html#differences-related-to-garbage-collection-strategies diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -14,14 +14,14 @@ (but not the release) are slightly miscompiled due to buildslave being old. Contributions are welcomed**. -Here are the binaries for the current release — **PyPy 2.2.1** — -(`what's new in PyPy 2.2.1?`_ `what's new in PyPy 2.2?`_) -for x86 and ARM Linux, Mac OS/X, Windows and the older release — **PyPy3 2.1 beta1** — (`what's new in -PyPy3 2.1 beta1?`_). +Here are the binaries for the current release — **PyPy 2.3** — +(`what's new in PyPy 2.3`_ ) + +for x86 and ARM Linux, Mac OS/X, Windows and the older release — **PyPy3 2.1 beta1** — +(`what's new in PyPy3 2.1 beta1?`_). .. _what's new in PyPy3 2.1 beta1?: http://doc.pypy.org/en/latest/release-pypy3-2.1.0-beta1.html -.. _what's new in PyPy 2.2?: http://doc.pypy.org/en/latest/release-2.2.0.html -.. _what's new in PyPy 2.2.1?: http://doc.pypy.org/en/latest/release-2.2.1.html +.. _what's new in PyPy 2.3?: http://doc.pypy.org/en/latest/release-2.3.0.html .. class:: download_menu @@ -47,11 +47,11 @@ x86 CPUs that have the SSE2_ instruction set (most of them do, nowadays), or on x86-64 CPUs. They also contain `stackless`_ extensions, like `greenlets`_. -(This is the official release 2.2.1; +(This is the official release 2.3; for the most up-to-date version see below.) -2.2.1 ------ +2.3 +--- Note that Linux binaries are dynamically linked, as is usual, and thus might not be usable due to the sad story of linux binary compatibility. This means @@ -86,16 +86,16 @@ * `All our downloads,`__ including previous versions. We also have a mirror_, but please use only if you have troubles accessing the links above -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-linux.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-linux64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-linux-armhf-raspbian.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-linux-armhf-raring.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-linux-armel.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-win32.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-linux.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-linux64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-linux-armhf-raspbian.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-linux-armhf-raring.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-linux-armel.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-osx64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-win32.zip .. _`VS 2008 runtime library installer vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-src.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-src.zip +.. __: https://bitbucket.org/pypy/pypy/get/release-2.3.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/get/release-2.3.zip .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/ @@ -179,7 +179,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in ``/opt``, and if you want, put a symlink from somewhere like -``/usr/local/bin/pypy`` to ``/path/to/pypy-2.2.1/bin/pypy``. Do +``/usr/local/bin/pypy`` to ``/path/to/pypy-2.3/bin/pypy``. Do not move or copy the executable ``pypy`` outside the tree --- put a symlink to it, otherwise it will not find its libraries. @@ -226,11 +226,11 @@ 1. Get the source code. The following packages contain the source at the same revision as the above binaries: - * `pypy-2.2.1-src.tar.bz2`__ (sources, Unix line endings) - * `pypy-2.2.1-src.zip`__ (sources, Unix line endings too, sorry) + * `pypy-2.3-src.tar.bz2`__ (sources, Unix line endings) + * `pypy-2.3-src.zip`__ (sources, Unix line endings too, sorry) - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-src.tar.bz2 - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.2.1-src.zip + .. __: https://bitbucket.org/pypy/pypy/get/release-2.3.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/get/release-2.3.zip Or you can checkout the current trunk using Mercurial_ (the trunk usually works and is of course more up-to-date):: @@ -308,15 +308,15 @@ Here are the checksums for each of the downloads (md5 and sha1):: - 0127094c36f985f6c2b4ed82be3fb6c8 pypy-2.2.1-linux-armel.tar.bz2 - 278e0ceb961bd0f49af53a01301601d4 pypy-2.2.1-linux-armhf-raring.tar.bz2 - dcb1803b109a43b3a5b166bb1f7c4736 pypy-2.2.1-linux-armhf-raspbian.tar.bz2 - 86ef92a13faf309f4f413e35c01afcab pypy-2.2.1-linux.tar.bz2 - 96732d12957fa3c8e58ded2939631e5f pypy-2.2.1-linux64.tar.bz2 - 7ec9f48702323f9e93654ba73dd46720 pypy-2.2.1-src.tar.bz2 - dec735b2a0a0e9655a4a89c77ec56dcf pypy-2.2.1-src.zip - 213aacf7a06cd531c3457f1c5a73059c pypy-2.2.1-win32.zip - 040cd25e95f62242c41e9d4e1e1c1166 pypy-2.2.1-osx64.tar.bz2 + 391bda03d0642ebb3fa6d59ec90a1388 pypy-2.3-linux-armel.tar.bz2 + cdb738317e958031d0752dff5a1742f1 pypy-2.3-linux-armhf-raring.tar.bz2 + 0ab5df88c02b41f8f062b2893ccd5066 pypy-2.3-linux-armhf-raspbian.tar.bz2 + e84a1179a63632c62e311363a409df56 pypy-2.3-linux.tar.bz2 + 2ae65fa6b0ea9ddffd50a02a40f27186 pypy-2.3-linux64.tar.bz2 + c93a8e47f3b3109af2f66d2bd766eb97 pypy-2.3-src.tar.bz2 + a0253c8c072207c22f4bab97f8826966 pypy-2.3-src.zip + 55e0598ca9add0e7c78ffa49db6476c2 pypy-2.3-win32.zip + 76b5d7798d3a9b8919f792df5a402bb2 pypy-2.3-osx64.tar.bz2 f6adca4d26f34bef9903cc5347c7d688 pypy3-2.1-beta1-linux64.tar.bz2 d57d0d0d3c49c7cce75440924d8f66b7 pypy3-2.1-beta1-linux-armel.tar.bz2 55b82b199ccf537c7ea5e2f31df78dfe pypy3-2.1-beta1-linux-armhf-raring.tar.bz2 @@ -329,15 +329,15 @@ 2c9f0054f3b93a6473f10be35277825a pypy-1.8-sandbox-linux64.tar.bz2 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2 - 5df9cb5348da032cc1b8190101266be41f969bb3 pypy-2.2.1-linux-armel.tar.bz2 - f266f0f5a60db6fcdd4aa2c44aa20bf710736c94 pypy-2.2.1-linux-armhf-raring.tar.bz2 - 8cd77593762c6f4abaa91278590e927db7c08168 pypy-2.2.1-linux-armhf-raspbian.tar.bz2 - 287bca3f6b6bcc83453317f38777cb825b964902 pypy-2.2.1-linux.tar.bz2 - e4dff744853dacbc471b3d3f8db47897497b8c8d pypy-2.2.1-linux64.tar.bz2 - 51acfd6dde38e2f50ef75946326cd75c10d69c4b pypy-2.2.1-src.tar.bz2 - 367944bc9e045747cc1eafb2623629a6a07e8797 pypy-2.2.1-src.zip - 6db267c4b28bb87fa95af4c5488db559d35b4797 pypy-2.2.1-win32.zip - caf13d377fcdced4bfadd4158ba3d18d520396f3 pypy-2.2.1-osx64.tar.bz2 + 66d8d9ca9cbfd624b64b1c890df4adb2f05073fe pypy-2.3-linux-armel.tar.bz2 + 42412faad62c0e959ede36d07f481f3fea0ea314 pypy-2.3-linux-armhf-raring.tar.bz2 + 03b135611b0c227920c9b163bad047ab76992893 pypy-2.3-linux-armhf-raspbian.tar.bz2 + dce02c64e2d442f5205cd94a8bb0c2021347ad41 pypy-2.3-linux.tar.bz2 + 5bf5e2b2d9a002a9d81830dfffe5fa68959a8f40 pypy-2.3-linux64.tar.bz2 + 8d29b5948510fd0d4fd9d4376e41c48440cade4c pypy-2.3-src.tar.bz2 + c900ea9c29e2e360b9b9c542dd17172cbc144f01 pypy-2.3-src.zip + fcbfa5cc10c8dbcbf75195a5812f5267e3b97431 pypy-2.3-win32.zip + b40c3c56138725757ef0f97ffb67dce89b7665f0 pypy-2.3-osx64.tar.bz2 6aa8377a09f79f1ce145537865d80716e40378de pypy3-2.1-beta1-linux64.tar.bz2 c948aa751500e20df0678695524c6fc5088da39c pypy3-2.1-beta1-linux-armel.tar.bz2 b316e04cd99abccfcfe7007df7ce78e56feb8889 pypy3-2.1-beta1-linux-armhf-raring.tar.bz2 diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -6,7 +6,7 @@ PyPy features =========================================================== -**PyPy 2.2** implements **Python 2.7.3** and runs on Intel +**PyPy 2.3** implements **Python 2.7.6** and runs on Intel `x86 (IA-32)`_ , `x86_64`_ and `ARM`_ platforms, with PPC being stalled. It supports all of the core language, passing the Python test suite (with minor modifications that were already accepted in the main python @@ -84,23 +84,12 @@ PyPy has many secondary features and semi-independent projects. We will mention here: -* **the .NET backend:** There was a backend for building a native pypy - for the .NET/CLI VM. Of particular interest was `the cli-jit - branch`_, in which you could make a version of ``pypy-net`` which also - contains a high-level JIT compiler (it compiled your Python programs - Just in Time into CLR bytecodes). The code of this backend is very - old by now and would require serious work to get back into a working - state. If you would like to tackle this project, please `Contact us`_! - -* **the Java backend:** PyPy can run on the Java VM, but more care is - needed to finish this project. Writing a backend for our high-level - JIT compiler would be excellent. `Contact us`_! - * **Other languages:** we also implemented other languages that makes use of our RPython toolchain: Prolog_ (almost complete), as well as Smalltalk_, JavaScript_, Io_, Scheme_ and Gameboy_. - There is also a Ruby implementation called Topaz_. + There is also a Ruby implementation called Topaz_ and a PHP implementation + called HippyVM_. .. _`the cli-jit branch`: https://bitbucket.org/pypy/pypy/src/cli-jit @@ -112,3 +101,4 @@ .. _Scheme: https://bitbucket.org/pypy/lang-scheme/ .. _Gameboy: https://bitbucket.org/pypy/lang-gameboy/ .. _Topaz: http://topazruby.com/ +.. _HippyVM: http://www.hippyvm.com/ diff --git a/source/index.txt b/source/index.txt --- a/source/index.txt +++ b/source/index.txt @@ -4,7 +4,7 @@ --- PyPy is a `fast`_, `compliant`_ alternative implementation of the `Python`_ -language (2.7.3 and 3.2.3). It has several advantages and distinct features: +language (2.7.6 and 3.2.3). It has several advantages and distinct features: * **Speed:** thanks to its Just-in-Time compiler, Python programs often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ @@ -26,7 +26,7 @@ .. class:: download -`Download and try out the PyPy release 2.2.1 or the PyPy3 2.1 beta1!`__ +`Download and try out the PyPy release 2.3 or the PyPy3 2.1 beta1!`__ .. __: download.html From noreply at buildbot.pypy.org Fri May 9 08:26:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:15 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: change links for source archive Message-ID: <20140509062615.8F4C61C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r495:38d0f50e1742 Date: 2014-05-09 02:30 +0300 http://bitbucket.org/pypy/pypy.org/changeset/38d0f50e1742/ Log: change links for source archive diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -94,8 +94,8 @@ .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-osx64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-win32.zip .. _`VS 2008 runtime library installer vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 -.. __: https://bitbucket.org/pypy/pypy/get/release-2.3.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/get/release-2.3.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-src.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-src.zip .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/ @@ -229,8 +229,8 @@ * `pypy-2.3-src.tar.bz2`__ (sources, Unix line endings) * `pypy-2.3-src.zip`__ (sources, Unix line endings too, sorry) - .. __: https://bitbucket.org/pypy/pypy/get/release-2.3.tar.bz2 - .. __: https://bitbucket.org/pypy/pypy/get/release-2.3.zip + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-src.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.3-src.zip Or you can checkout the current trunk using Mercurial_ (the trunk usually works and is of course more up-to-date):: From noreply at buildbot.pypy.org Fri May 9 08:26:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:16 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update notable modules that work with PyPy Message-ID: <20140509062616.989A61C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r496:e6f69077f5ae Date: 2014-05-09 08:52 +0300 http://bitbucket.org/pypy/pypy.org/changeset/e6f69077f5ae/ Log: update notable modules that work with PyPy diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -50,6 +50,10 @@ * pyglet +* Pillow (the PIL fork) + +* `lxml`_ + Known differences that are not going to be fixed: * PyPy does not support refcounting semantics. The following code @@ -104,4 +108,4 @@ .. _`our dev site`: http://pypy.readthedocs.org/en/latest/cpython_differences.html .. _`more details here`: http://pypy.readthedocs.org/en/latest/cpython_differences.html#differences-related-to-garbage-collection-strategies .. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home - +.. _`lxml`: https://github.com/amauryfa/lxml/tree/cffi/ From noreply at buildbot.pypy.org Fri May 9 08:26:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:17 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: rebuild Message-ID: <20140509062617.A03ED1C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r497:b25075d997cf Date: 2014-05-09 08:52 +0300 http://bitbucket.org/pypy/pypy.org/changeset/b25075d997cf/ Log: rebuild diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -45,12 +45,13 @@

Python compatibility

-

PyPy implements the Python language version 2.7.3. It supports all of the core +

PyPy implements the Python language version 2.7.6. It supports all of the core language, passing Python test suite (with minor modifications that were already accepted in the main python in newer versions). It supports most of the commonly used Python standard library modules; details below.

PyPy has alpha/beta-level support for the CPython C API, however, as of -2.2 release this feature is not yet complete. Many libraries will require +2.3 release this feature is not yet complete. We strongly advise use of CFFI +instead. CFFI come builtin with PyPy. Many libraries will require a bit of effort to work, but there are known success stories. Check out PyPy blog for updates, as well as the Compatibility Wiki.

C extensions need to be recompiled for PyPy in order to work. Depending on @@ -77,6 +78,8 @@

  • pylons
  • divmod's nevow
  • pyglet
  • +
  • Pillow (the PIL fork)
  • +
  • lxml
  • Known differences that are not going to be fixed:

      diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -50,10 +50,10 @@ performance improvements. Note that the OS X nightly builds (but not the release) are slightly miscompiled due to buildslave being old. Contributions are welcomed.

      -

      Here are the binaries for the current release — PyPy 2.2.1 — -(what's new in PyPy 2.2.1? what's new in PyPy 2.2?) -for x86 and ARM Linux, Mac OS/X, Windows and the older release — PyPy3 2.1 beta1 — (what's new in -PyPy3 2.1 beta1?).

      +

      Here are the binaries for the current release — PyPy 2.3 — +(`what's new in PyPy 2.3`_ )

      +

      for x86 and ARM Linux, Mac OS/X, Windows and the older release — PyPy3 2.1 beta1 — +(what's new in PyPy3 2.1 beta1?).

      • Download
        • Default (with a JIT Compiler)
        • @@ -73,11 +73,11 @@ x86 CPUs that have the SSE2 instruction set (most of them do, nowadays), or on x86-64 CPUs. They also contain stackless extensions, like greenlets. -(This is the official release 2.2.1; +(This is the official release 2.3; for the most up-to-date version see below.)

    -

    2.2.1

    +

    2.3

    Note that Linux binaries are dynamically linked, as is usual, and thus might not be usable due to the sad story of linux binary compatibility. This means that Linux binaries are only usable on the distributions written next to @@ -89,16 +89,16 @@ degrees of being up-to-date. You may have more chances trying out Squeaky's portable Linux binaries.

    @@ -158,7 +158,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in /opt, and if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy-2.2.1/bin/pypy. Do +/usr/local/bin/pypy to /path/to/pypy-2.3/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    @@ -194,8 +194,8 @@
  • Get the source code. The following packages contain the source at the same revision as the above binaries:

    Or you can checkout the current trunk using Mercurial (the trunk usually works and is of course more up-to-date):

    @@ -264,15 +264,15 @@

    Checksums

    Here are the checksums for each of the downloads (md5 and sha1):

    -0127094c36f985f6c2b4ed82be3fb6c8  pypy-2.2.1-linux-armel.tar.bz2
    -278e0ceb961bd0f49af53a01301601d4  pypy-2.2.1-linux-armhf-raring.tar.bz2
    -dcb1803b109a43b3a5b166bb1f7c4736  pypy-2.2.1-linux-armhf-raspbian.tar.bz2
    -86ef92a13faf309f4f413e35c01afcab  pypy-2.2.1-linux.tar.bz2
    -96732d12957fa3c8e58ded2939631e5f  pypy-2.2.1-linux64.tar.bz2
    -7ec9f48702323f9e93654ba73dd46720  pypy-2.2.1-src.tar.bz2
    -dec735b2a0a0e9655a4a89c77ec56dcf  pypy-2.2.1-src.zip
    -213aacf7a06cd531c3457f1c5a73059c  pypy-2.2.1-win32.zip
    -040cd25e95f62242c41e9d4e1e1c1166  pypy-2.2.1-osx64.tar.bz2
    +391bda03d0642ebb3fa6d59ec90a1388  pypy-2.3-linux-armel.tar.bz2
    +cdb738317e958031d0752dff5a1742f1  pypy-2.3-linux-armhf-raring.tar.bz2
    +0ab5df88c02b41f8f062b2893ccd5066  pypy-2.3-linux-armhf-raspbian.tar.bz2
    +e84a1179a63632c62e311363a409df56  pypy-2.3-linux.tar.bz2
    +2ae65fa6b0ea9ddffd50a02a40f27186  pypy-2.3-linux64.tar.bz2
    +c93a8e47f3b3109af2f66d2bd766eb97  pypy-2.3-src.tar.bz2
    +a0253c8c072207c22f4bab97f8826966  pypy-2.3-src.zip
    +55e0598ca9add0e7c78ffa49db6476c2  pypy-2.3-win32.zip
    +76b5d7798d3a9b8919f792df5a402bb2  pypy-2.3-osx64.tar.bz2
     f6adca4d26f34bef9903cc5347c7d688  pypy3-2.1-beta1-linux64.tar.bz2
     d57d0d0d3c49c7cce75440924d8f66b7  pypy3-2.1-beta1-linux-armel.tar.bz2
     55b82b199ccf537c7ea5e2f31df78dfe  pypy3-2.1-beta1-linux-armhf-raring.tar.bz2
    @@ -284,15 +284,15 @@
     cba4bdcfaed94185b20637379cb236b9  pypy3-2.1-beta1-src.zip
     2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
     009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
    -5df9cb5348da032cc1b8190101266be41f969bb3  pypy-2.2.1-linux-armel.tar.bz2
    -f266f0f5a60db6fcdd4aa2c44aa20bf710736c94  pypy-2.2.1-linux-armhf-raring.tar.bz2
    -8cd77593762c6f4abaa91278590e927db7c08168  pypy-2.2.1-linux-armhf-raspbian.tar.bz2
    -287bca3f6b6bcc83453317f38777cb825b964902  pypy-2.2.1-linux.tar.bz2
    -e4dff744853dacbc471b3d3f8db47897497b8c8d  pypy-2.2.1-linux64.tar.bz2
    -51acfd6dde38e2f50ef75946326cd75c10d69c4b  pypy-2.2.1-src.tar.bz2
    -367944bc9e045747cc1eafb2623629a6a07e8797  pypy-2.2.1-src.zip
    -6db267c4b28bb87fa95af4c5488db559d35b4797  pypy-2.2.1-win32.zip
    -caf13d377fcdced4bfadd4158ba3d18d520396f3  pypy-2.2.1-osx64.tar.bz2
    +66d8d9ca9cbfd624b64b1c890df4adb2f05073fe  pypy-2.3-linux-armel.tar.bz2
    +42412faad62c0e959ede36d07f481f3fea0ea314  pypy-2.3-linux-armhf-raring.tar.bz2
    +03b135611b0c227920c9b163bad047ab76992893  pypy-2.3-linux-armhf-raspbian.tar.bz2
    +dce02c64e2d442f5205cd94a8bb0c2021347ad41  pypy-2.3-linux.tar.bz2
    +5bf5e2b2d9a002a9d81830dfffe5fa68959a8f40  pypy-2.3-linux64.tar.bz2
    +8d29b5948510fd0d4fd9d4376e41c48440cade4c  pypy-2.3-src.tar.bz2
    +c900ea9c29e2e360b9b9c542dd17172cbc144f01  pypy-2.3-src.zip
    +fcbfa5cc10c8dbcbf75195a5812f5267e3b97431  pypy-2.3-win32.zip
    +b40c3c56138725757ef0f97ffb67dce89b7665f0  pypy-2.3-osx64.tar.bz2
     6aa8377a09f79f1ce145537865d80716e40378de  pypy3-2.1-beta1-linux64.tar.bz2
     c948aa751500e20df0678695524c6fc5088da39c  pypy3-2.1-beta1-linux-armel.tar.bz2
     b316e04cd99abccfcfe7007df7ce78e56feb8889  pypy3-2.1-beta1-linux-armhf-raring.tar.bz2
    @@ -306,6 +306,12 @@
     be94460bed8b2682880495435c309b6611ae2c31  pypy-1.8-sandbox-linux.tar.bz2
     
  • +
    +

    Docutils System Messages

    +
    +

    System Message: ERROR/3 ([dynamic-text], line 12); backlink

    +Unknown target name: “what's new in pypy 2.3”.
    +
    diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -45,7 +45,7 @@

    Features

    -

    PyPy 2.2 implements Python 2.7.3 and runs on Intel +

    PyPy 2.3 implements Python 2.7.6 and runs on Intel x86 (IA-32) , x86_64 and ARM platforms, with PPC being stalled. It supports all of the core language, passing the Python test suite (with minor modifications that were already accepted in the main python @@ -95,22 +95,11 @@

    PyPy has many secondary features and semi-independent projects. We will mention here:

      -
    • the .NET backend: There was a backend for building a native pypy -for the .NET/CLI VM. Of particular interest was the cli-jit -branch, in which you could make a version of pypy-net which also -contains a high-level JIT compiler (it compiled your Python programs -Just in Time into CLR bytecodes). The code of this backend is very -old by now and would require serious work to get back into a working -state. If you would like to tackle this project, please Contact us!

      -
    • -
    • the Java backend: PyPy can run on the Java VM, but more care is -needed to finish this project. Writing a backend for our high-level -JIT compiler would be excellent. Contact us!

      -
    • Other languages: we also implemented other languages that makes use of our RPython toolchain: Prolog (almost complete), as well as Smalltalk, JavaScript, Io, Scheme and Gameboy.

      -

      There is also a Ruby implementation called Topaz.

      +

      There is also a Ruby implementation called Topaz and a PHP implementation +called HippyVM.

    diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -46,7 +46,7 @@

    Welcome to PyPy

    PyPy is a fast, compliant alternative implementation of the Python -language (2.7.3 and 3.2.3). It has several advantages and distinct features:

    +language (2.7.6 and 3.2.3). It has several advantages and distinct features:

    • Speed: thanks to its Just-in-Time compiler, Python programs @@ -63,7 +63,7 @@
    • As well as other features.
    -

    Download and try out the PyPy release 2.2.1 or the PyPy3 2.1 beta1!

    +

    Download and try out the PyPy release 2.3 or the PyPy3 2.1 beta1!

    Want to know more? A good place to start is our detailed speed and compatibility reports!

    From noreply at buildbot.pypy.org Fri May 9 08:26:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:18 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: typo Message-ID: <20140509062618.9A9471C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r498:a1566bb0df0c Date: 2014-05-09 09:00 +0300 http://bitbucket.org/pypy/pypy.org/changeset/a1566bb0df0c/ Log: typo diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -51,7 +51,7 @@ (but not the release) are slightly miscompiled due to buildslave being old. Contributions are welcomed
    .

    Here are the binaries for the current release — PyPy 2.3 — -(`what's new in PyPy 2.3`_ )

    +(what's new in PyPy 2.3? )

    for x86 and ARM Linux, Mac OS/X, Windows and the older release — PyPy3 2.1 beta1 — (what's new in PyPy3 2.1 beta1?).

      @@ -306,12 +306,6 @@ be94460bed8b2682880495435c309b6611ae2c31 pypy-1.8-sandbox-linux.tar.bz2
    -
    -

    Docutils System Messages

    -
    -

    System Message: ERROR/3 ([dynamic-text], line 12); backlink

    -Unknown target name: “what's new in pypy 2.3”.
    -
    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -15,7 +15,7 @@ miscompiled due to buildslave being old. Contributions are welcomed**. Here are the binaries for the current release — **PyPy 2.3** — -(`what's new in PyPy 2.3`_ ) +(`what's new in PyPy 2.3?`_ ) for x86 and ARM Linux, Mac OS/X, Windows and the older release — **PyPy3 2.1 beta1** — (`what's new in PyPy3 2.1 beta1?`_). @@ -61,7 +61,7 @@ source or downloading your PyPy from your release vendor. `Ubuntu`_ (`PPA`_), `Debian`_, `Homebrew`_, MacPorts, `Fedora`_, `Gentoo`_ and `Arch`_ are known to package PyPy, with various -degrees of being up-to-date. You may have more chances trying out Squeaky's +degrees of being up-to-date. You may have more luck trying out Squeaky's `portable Linux binaries`_. .. _`Ubuntu`: http://packages.ubuntu.com/raring/pypy From noreply at buildbot.pypy.org Fri May 9 08:26:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:19 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: add a caveat Message-ID: <20140509062619.914121C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r499:b81128f8345c Date: 2014-05-09 09:03 +0300 http://bitbucket.org/pypy/pypy.org/changeset/b81128f8345c/ Log: add a caveat diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -217,6 +217,8 @@ cd numpy pypy setup.py install +Note that NumPy support is still a work-in-progress, many things do not +work and those that do may not be any faster than NumPy on CPython. .. _translate: From noreply at buildbot.pypy.org Fri May 9 08:26:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:20 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: the sad truth Message-ID: <20140509062620.81E9F1C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r500:759d2187da46 Date: 2014-05-09 09:18 +0300 http://bitbucket.org/pypy/pypy.org/changeset/759d2187da46/ Log: the sad truth diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -86,7 +86,7 @@ source or downloading your PyPy from your release vendor. Ubuntu (PPA), Debian, Homebrew, MacPorts, Fedora, Gentoo and Arch are known to package PyPy, with various -degrees of being up-to-date. You may have more chances trying out Squeaky's +degrees of being up-to-date. You may have more luck trying out Squeaky's portable Linux binaries.

    • Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
    • @@ -187,6 +187,8 @@ cd numpy pypy setup.py install +

      Note that NumPy support is still a work-in-progress, many things do not +work and those that do may not be any faster than NumPy on CPython.

      Building from source

      @@ -218,7 +220,7 @@ pypy ../../rpython/bin/rpython -O2 --sandbox targetpypystandalone # get the sandbox version -
    • Enjoy Mandelbrot :-) It takes on the order of half an hour to +

    • Enjoy Mandelbrot :-) It takes on the order of an hour to finish the translation, and 2.x GB of RAM on a 32-bit system and 4.x GB on 64-bit systems. (Do not start a translation on a machine with insufficient RAM! It will just swap forever. See diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -254,7 +254,7 @@ pypy ../../rpython/bin/rpython -O2 targetpypystandalone # get the no-jit version pypy ../../rpython/bin/rpython -O2 --sandbox targetpypystandalone # get the sandbox version -5. Enjoy Mandelbrot ``:-)`` It takes on the order of half an hour to +5. Enjoy Mandelbrot ``:-)`` It takes on the order of an hour to finish the translation, and 2.x GB of RAM on a 32-bit system and 4.x GB on 64-bit systems. (Do not start a translation on a machine with insufficient RAM! It will just swap forever. See From noreply at buildbot.pypy.org Fri May 9 08:26:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 08:26:21 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: make checksum list on download page fit a bit better Message-ID: <20140509062621.752311C0685@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r501:a5408dbc649c Date: 2014-05-09 09:21 +0300 http://bitbucket.org/pypy/pypy.org/changeset/a5408dbc649c/ Log: make checksum list on download page fit a bit better diff --git a/css/site.css b/css/site.css --- a/css/site.css +++ b/css/site.css @@ -167,7 +167,7 @@ #body-inner { margin: 0 auto; padding: 10px 20px; - width: 950px; + width: 1000px; } #body-outer { @@ -223,7 +223,7 @@ #main { float: left; padding: 10px 30px 0 10px; - width: 630px; + width: 680px; line-height: 2em; font-size: 0.9em; } @@ -1375,4 +1375,4 @@ position: absolute; text-align: center; left: 400px; -} \ No newline at end of file +} From noreply at buildbot.pypy.org Fri May 9 09:08:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 09:08:16 +0200 (CEST) Subject: [pypy-commit] pypy default: update list of contributors Message-ID: <20140509070816.D76291D2BF9@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71428:2f31e072b56e Date: 2014-05-09 10:07 +0300 http://bitbucket.org/pypy/pypy/changeset/2f31e072b56e/ Log: update list of contributors diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,61 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +146,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +168,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +192,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -205,6 +214,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +228,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -235,27 +246,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +282,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz From noreply at buildbot.pypy.org Fri May 9 09:08:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 May 2014 09:08:18 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: update list of contributors Message-ID: <20140509070818.1065C1D2BF9@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71429:23822194f497 Date: 2014-05-09 10:07 +0300 http://bitbucket.org/pypy/pypy/changeset/23822194f497/ Log: update list of contributors diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,61 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +146,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +168,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +192,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -205,6 +214,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +228,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -235,27 +246,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +282,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz From noreply at buildbot.pypy.org Fri May 9 16:15:47 2014 From: noreply at buildbot.pypy.org (ISF) Date: Fri, 9 May 2014 16:15:47 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Define JITFRAME_FIXED_SIZE Message-ID: <20140509141547.02C9A1D2BB8@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71430:e20b769ecc66 Date: 2014-05-09 14:12 +0000 http://bitbucket.org/pypy/pypy/changeset/e20b769ecc66/ Log: Define JITFRAME_FIXED_SIZE diff --git a/rpython/jit/backend/ppc/arch.py b/rpython/jit/backend/ppc/arch.py --- a/rpython/jit/backend/ppc/arch.py +++ b/rpython/jit/backend/ppc/arch.py @@ -33,6 +33,16 @@ FORCE_INDEX_OFS = (len(MANAGED_REGS) + len(MANAGED_FP_REGS)) * WORD +# The JITFRAME_FIXED_SIZE is measured in words, not bytes or bits. +# Follwing the PPC ABI, we are saving: +# - volatile fpr's +# - volatile gpr's +# - vrsave word +# - alignment padding +# - vector register save area (quadword aligned) +# 3 + 27 + 1 + 4 + 1 +JITFRAME_FIXED_SIZE = len(MANAGED_FP_REGS) + len(MANAGED_REGS) + 1 + 4 + 1 + # offset to LR in BACKCHAIN if IS_PPC_32: LR_BC_OFFSET = WORD diff --git a/rpython/jit/backend/ppc/locations.py b/rpython/jit/backend/ppc/locations.py --- a/rpython/jit/backend/ppc/locations.py +++ b/rpython/jit/backend/ppc/locations.py @@ -1,6 +1,8 @@ from rpython.jit.metainterp.history import INT, FLOAT import sys +# TODO: solve the circular import: runner -> arch -> register -> locations -> +# arch # XXX import from arch.py, currently we have a circular import if sys.maxint == (2**31 - 1): WORD = 4 @@ -10,6 +12,9 @@ FWORD = 8 DWORD = 2 * WORD +# JITFRAME_FIXED_SIZE is also duplicated because of the circular import +JITFRAME_FIXED_SIZE = 27 + 31 + 1 + 4 + 1 + class AssemblerLocation(object): _immutable_ = True type = INT From noreply at buildbot.pypy.org Fri May 9 16:15:48 2014 From: noreply at buildbot.pypy.org (ISF) Date: Fri, 9 May 2014 16:15:48 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Update StackLocation Message-ID: <20140509141548.2226C1D2BB8@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71431:dbe810ba1de4 Date: 2014-05-09 14:13 +0000 http://bitbucket.org/pypy/pypy/changeset/dbe810ba1de4/ Log: Update StackLocation diff --git a/rpython/jit/backend/ppc/locations.py b/rpython/jit/backend/ppc/locations.py --- a/rpython/jit/backend/ppc/locations.py +++ b/rpython/jit/backend/ppc/locations.py @@ -112,21 +112,24 @@ class StackLocation(AssemblerLocation): _immutable_ = True - def __init__(self, position, num_words=1, type=INT): + def __init__(self, position, fp_offset, type=INT): if type == FLOAT: self.width = FWORD else: self.width = WORD self.position = position + self.value = fp_offset self.type = type - self.value = get_spp_offset(position) def __repr__(self): - return 'SPP(%s)+%d' % (self.type, self.value) + return 'FP(%s)+%d' % (self.type, self.value) def location_code(self): return 'b' + def get_position(self): + return self.position + def assembler(self): return repr(self) @@ -134,13 +137,16 @@ return True def as_key(self): - return -self.position + 10000 + return self.position + 10000 def imm(val): return ImmLocation(val) def get_spp_offset(pos): if pos < 0: - return -pos * WORD + return pos * WORD else: - return -(pos + 1) * WORD + return (pos + 1) * WORD + +def get_fp_offset(base_ofs, position): + return base_ofs + position From noreply at buildbot.pypy.org Fri May 9 16:15:49 2014 From: noreply at buildbot.pypy.org (ISF) Date: Fri, 9 May 2014 16:15:49 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Fix regalloc initialization and usage Message-ID: <20140509141549.50F2E1D2BB8@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71432:ac487b65dce6 Date: 2014-05-09 14:14 +0000 http://bitbucket.org/pypy/pypy/changeset/ac487b65dce6/ Log: Fix regalloc initialization and usage diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -812,9 +812,9 @@ 'e', looptoken.number) self.startpos = self.mc.currpos() - regalloc = Regalloc(assembler=self, frame_manager=PPCFrameManager()) + regalloc = Regalloc(assembler=self) - regalloc.prepare_loop(inputargs, operations) + regalloc.prepare_loop(inputargs, operations, looptoken) start_pos = self.mc.currpos() looptoken._ppc_loop_code = start_pos diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -1,10 +1,11 @@ from rpython.jit.backend.llsupport.regalloc import (RegisterManager, FrameManager, - TempBox, compute_vars_longevity) + TempBox, compute_vars_longevity, + BaseRegalloc) from rpython.jit.backend.ppc.arch import (WORD, MY_COPY_OF_REGS, IS_PPC_32) from rpython.jit.codewriter import longlong from rpython.jit.backend.ppc.jump import (remap_frame_layout, remap_frame_layout_mixed) -from rpython.jit.backend.ppc.locations import imm +from rpython.jit.backend.ppc.locations import imm, get_fp_offset, get_spp_offset from rpython.jit.backend.ppc.helper.regalloc import (_check_imm_arg, prepare_cmp_op, prepare_unary_int_op, @@ -174,14 +175,14 @@ return reg class PPCFrameManager(FrameManager): - def __init__(self): + def __init__(self, base_ofs): FrameManager.__init__(self) self.used = [] + self.base_ofs = base_ofs - @staticmethod - def frame_pos(loc, type): - num_words = PPCFrameManager.frame_size(type) - return locations.StackLocation(loc, num_words=num_words, type=type) + def frame_pos(self, loc, box_type): + #return locations.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type) + return locations.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type) @staticmethod def frame_size(type): @@ -192,18 +193,19 @@ assert loc.is_stack() return loc.position -class Regalloc(object): +class Regalloc(BaseRegalloc): - def __init__(self, frame_manager=None, assembler=None): + def __init__(self, assembler=None): self.cpu = assembler.cpu - self.frame_manager = frame_manager + self.frame_manager = PPCFrameManager(self.cpu.get_baseofs_of_frame_field()) self.assembler = assembler self.jump_target_descr = None self.final_jump_op = None def _prepare(self, inputargs, operations): - longevity, last_real_usage = compute_vars_longevity( - inputargs, operations) + self.fm = self.frame_manager + longevity, last_real_usage = compute_vars_longevity(inputargs, + operations) self.longevity = longevity self.last_real_usage = last_real_usage fm = self.frame_manager @@ -211,9 +213,9 @@ self.fprm = FPRegisterManager(longevity, fm, asm) self.rm = PPCRegisterManager(longevity, fm, asm) - def prepare_loop(self, inputargs, operations): + def prepare_loop(self, inputargs, operations, looptoken): self._prepare(inputargs, operations) - self._set_initial_bindings(inputargs) + self._set_initial_bindings(inputargs, looptoken) self.possibly_free_vars(inputargs) def prepare_bridge(self, inputargs, arglocs, ops): From noreply at buildbot.pypy.org Fri May 9 19:43:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 May 2014 19:43:09 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/31f9797a356c Message-ID: <20140509174309.436481D29FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71433:1dde67e0368a Date: 2014-05-08 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/1dde67e0368a/ Log: import stmgc/31f9797a356c diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -fb2bc9a3419a +31f9797a356c diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -210,6 +210,7 @@ }; static char *stm_object_pages; +static int stm_object_pages_fd; static stm_thread_local_t *stm_all_thread_locals = NULL; static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -9,14 +9,10 @@ static char *fork_big_copy = NULL; +static int fork_big_copy_fd; static stm_thread_local_t *fork_this_tl; static bool fork_was_in_transaction; -static char *setup_mmap(char *reason); /* forward, in setup.c */ -static void setup_protection_settings(void); /* forward, in setup.c */ -static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ - - static bool page_is_null(char *p) { long *q = (long *)p; @@ -75,7 +71,8 @@ /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages */ - char *big_copy = setup_mmap("stmgc's fork support"); + int big_copy_fd; + char *big_copy = setup_mmap("stmgc's fork support", &big_copy_fd); /* Copy each of the segment infos into the new mmap, nurseries, and associated read markers @@ -140,6 +137,7 @@ assert(fork_big_copy == NULL); fork_big_copy = big_copy; + fork_big_copy_fd = big_copy_fd; fork_this_tl = this_tl; fork_was_in_transaction = was_in_transaction; @@ -164,6 +162,7 @@ assert(fork_big_copy != NULL); munmap(fork_big_copy, TOTAL_MEMORY); fork_big_copy = NULL; + close_fd_mmap(fork_big_copy_fd); bool was_in_transaction = fork_was_in_transaction; s_mutex_unlock(); @@ -215,6 +214,8 @@ if (res != stm_object_pages) stm_fatalerror("after fork: mremap failed: %m"); fork_big_copy = NULL; + close_fd_mmap(stm_object_pages_fd); + stm_object_pages_fd = fork_big_copy_fd; /* Unregister all other stm_thread_local_t, mostly as a way to free the memory used by the shadowstacks diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -82,9 +82,18 @@ can only be remapped to page N in another segment */ assert(((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0); +#ifdef USE_REMAP_FILE_PAGES int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) stm_fatalerror("remap_file_pages: %m"); +#else + char *res = mmap(addr, size, + PROT_READ | PROT_WRITE, + (MAP_PAGES_FLAGS & ~MAP_ANONYMOUS) | MAP_FIXED, + stm_object_pages_fd, pgoff * 4096UL); + if (UNLIKELY(res != addr)) + stm_fatalerror("mmap (remapping page): %m"); +#endif } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) @@ -170,6 +179,7 @@ static void pages_setup_readmarkers_for_nursery(void) { +#ifdef USE_REMAP_FILE_PAGES /* The nursery page's read markers are never read, but must still be writeable. We'd like to map the pages to a general "trash page"; missing one, we remap all the pages over to the same one. @@ -188,4 +198,5 @@ /* errors here ignored */ } } +#endif } diff --git a/rpython/translator/stm/src_stm/stm/pages.h b/rpython/translator/stm/src_stm/stm/pages.h --- a/rpython/translator/stm/src_stm/stm/pages.h +++ b/rpython/translator/stm/src_stm/stm/pages.h @@ -20,6 +20,8 @@ #define PAGE_FLAG_START END_NURSERY_PAGE #define PAGE_FLAG_END NB_PAGES +#define USE_REMAP_FILE_PAGES + struct page_shared_s { #if NB_SEGMENTS <= 8 uint8_t by_segment; diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -4,7 +4,8 @@ #endif -static char *setup_mmap(char *reason) +#ifdef USE_REMAP_FILE_PAGES +static char *setup_mmap(char *reason, int *ignored) { char *result = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, @@ -14,6 +15,45 @@ return result; } +static void close_fd_mmap(int ignored) +{ +} +#else +#include /* For O_* constants */ +static char *setup_mmap(char *reason, int *map_fd) +{ + char name[128]; + sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", + (long)getpid(), get_stm_time()); + + /* Create the big shared memory object, and immediately unlink it. + There is a small window where if this process is killed the + object is left around. It doesn't seem possible to do anything + about it... + */ + int fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); + shm_unlink(name); + + if (fd == -1) { + stm_fatalerror("%s failed (stm_open): %m", reason); + } + if (ftruncate(fd, TOTAL_MEMORY) != 0) { + stm_fatalerror("%s failed (ftruncate): %m", reason); + } + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS & ~MAP_ANONYMOUS, fd, 0); + if (result == MAP_FAILED) { + stm_fatalerror("%s failed (mmap): %m", reason); + } + *map_fd = fd; + return result; +} +static void close_fd_mmap(int map_fd) +{ + close(map_fd); +} +#endif static void setup_protection_settings(void) { @@ -57,7 +97,8 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()", + &stm_object_pages_fd); setup_protection_settings(); long i; @@ -87,15 +128,16 @@ pr->callbacks_on_abort = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; + pr->pub.transaction_read_version = 0xff; } /* The pages are shared lazily, as remap_file_pages() takes a relatively long time for each page. - The read markers are initially zero, which is correct: - STM_SEGMENT->transaction_read_version never contains zero, - so a null read marker means "not read" whatever the - current transaction_read_version is. + The read markers are initially zero, but we set anyway + transaction_read_version to 0xff in order to force the first + transaction to "clear" the read markers by mapping a different, + private range of addresses. */ setup_sync(); @@ -127,6 +169,7 @@ munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; + close_fd_mmap(stm_object_pages_fd); teardown_core(); teardown_sync(); diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -8,6 +8,7 @@ #include "stm/pages.h" #include "stm/gcpage.h" #include "stm/sync.h" +#include "stm/setup.h" #include "stm/largemalloc.h" #include "stm/nursery.h" #include "stm/contention.h" From noreply at buildbot.pypy.org Fri May 9 19:43:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 May 2014 19:43:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add missing file Message-ID: <20140509174310.8A2231D29FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71434:ed7037dd6844 Date: 2014-05-08 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ed7037dd6844/ Log: Add missing file diff --git a/rpython/translator/stm/src_stm/stm/setup.h b/rpython/translator/stm/src_stm/stm/setup.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/setup.h @@ -0,0 +1,6 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ + +static char *setup_mmap(char *reason, int *map_fd); +static void close_fd_mmap(int map_fd); +static void setup_protection_settings(void); +static pthread_t *_get_cpth(stm_thread_local_t *); From noreply at buildbot.pypy.org Fri May 9 19:43:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 May 2014 19:43:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Like CPython, the built-in module should be "_struct", imported Message-ID: <20140509174311.A73C41D29FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71435:d53eba2e73ec Date: 2014-05-09 19:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d53eba2e73ec/ Log: Like CPython, the built-in module should be "_struct", imported by the file "lib-python/2.7/struct.py". Fixes a very obscure ImportError by running "-m idlelib.idle". diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -45,6 +45,8 @@ The variable struct.error is an exception raised on errors.""" + applevel_name = "_struct" + interpleveldefs = { 'error': 'interp_struct.get_error(space)', @@ -55,6 +57,7 @@ 'unpack_from': 'interp_struct.unpack_from', 'Struct': 'interp_struct.W_Struct', + '_clearcache': 'interp_struct.clearcache', } appleveldefs = { diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -142,3 +142,6 @@ pack_into=interp2app(W_Struct.descr_pack_into), unpack_from=interp2app(W_Struct.descr_unpack_from), ) + +def clearcache(space): + """No-op on PyPy""" From noreply at buildbot.pypy.org Fri May 9 19:50:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 May 2014 19:50:11 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Like CPython, the built-in module should be "_struct", imported Message-ID: <20140509175011.9976B1D29FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71436:1c713161e6ab Date: 2014-05-09 19:42 +0200 http://bitbucket.org/pypy/pypy/changeset/1c713161e6ab/ Log: Like CPython, the built-in module should be "_struct", imported by the file "lib-python/2.7/struct.py". Fixes a very obscure ImportError by running "-m idlelib.idle". diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -45,6 +45,8 @@ The variable struct.error is an exception raised on errors.""" + applevel_name = "_struct" + interpleveldefs = { 'error': 'interp_struct.get_error(space)', @@ -55,6 +57,7 @@ 'unpack_from': 'interp_struct.unpack_from', 'Struct': 'interp_struct.W_Struct', + '_clearcache': 'interp_struct.clearcache', } appleveldefs = { diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -138,3 +138,6 @@ pack_into=interp2app(W_Struct.descr_pack_into), unpack_from=interp2app(W_Struct.descr_unpack_from), ) + +def clearcache(space): + """No-op on PyPy""" From noreply at buildbot.pypy.org Fri May 9 22:21:45 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 9 May 2014 22:21:45 +0200 (CEST) Subject: [pypy-commit] pypy default: link fix Message-ID: <20140509202145.3006D1D2BC2@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71437:269e37a5f339 Date: 2014-05-09 13:21 -0700 http://bitbucket.org/pypy/pypy/changeset/269e37a5f339/ Log: link fix diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -93,7 +93,7 @@ * Fix handling of tp_name for type objects .. _`HippyVM`: http://www.hippyvm.com -.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.0.html New Platforms and Features diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -624,6 +624,7 @@ def get_raw_address(self): return self.array._charbuf_start() + def make_array(mytype): W_ArrayBase = globals()['W_ArrayBase'] From noreply at buildbot.pypy.org Sat May 10 16:03:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 May 2014 16:03:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Update: mention the varying slow-down factor Message-ID: <20140510140349.B4DB81D2D37@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71438:05559fd0dfb2 Date: 2014-05-10 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/05559fd0dfb2/ Log: Update: mention the varying slow-down factor diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -30,7 +30,8 @@ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ listed below, it should be in theory within 20%-50% slower than a -regular PyPy, comparing the JIT version in both cases. It is called +regular PyPy, comparing the JIT version in both cases (but see below!). +It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -90,6 +91,11 @@ * So far, small examples work fine, but there are still a few bugs. We're busy fixing them as we find them; feel free to `report bugs`_. +* It runs with an overhead as low as 20% on examples like "richards". + There are also other examples with higher overheads --up to 10x for + "translate.py"-- which we are still trying to understand. One suspect + is our partial GC implementation, see below. + * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause segfaults. From noreply at buildbot.pypy.org Sat May 10 17:14:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 May 2014 17:14:34 +0200 (CEST) Subject: [pypy-commit] pypy default: test and fix Message-ID: <20140510151434.698FA1D29FD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71439:6a78aac458a8 Date: 2014-05-10 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/6a78aac458a8/ Log: test and fix diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -288,7 +288,9 @@ _about_ = make_sure_not_resized def compute_result_annotation(self, s_arg): - from rpython.annotator.model import SomeList + from rpython.annotator.model import SomeList, s_None + if s_None.contains(s_arg): + return s_arg # only None: just return assert isinstance(s_arg, SomeList) # the logic behind it is that we try not to propagate # make_sure_not_resized, when list comprehension is not on diff --git a/rpython/rlib/test/test_debug.py b/rpython/rlib/test/test_debug.py --- a/rpython/rlib/test/test_debug.py +++ b/rpython/rlib/test/test_debug.py @@ -53,6 +53,15 @@ py.test.raises(ListChangeUnallowed, interpret, f, [], list_comprehension_operations=True) +def test_make_sure_not_resized_annorder(): + def f(n): + if n > 5: + result = None + else: + result = [1,2,3] + make_sure_not_resized(result) + interpret(f, [10]) + def test_mark_dict_non_null(): def f(): d = {"ac": "bx"} From noreply at buildbot.pypy.org Sat May 10 22:29:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 10 May 2014 22:29:07 +0200 (CEST) Subject: [pypy-commit] pypy default: test, fix to only output IRC topic if nonrelease or if PYPY_IRC_TOPIC set Message-ID: <20140510202907.5BA141D236E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71440:adbfbb5d604d Date: 2014-05-10 23:16 +0300 http://bitbucket.org/pypy/pypy/changeset/adbfbb5d604d/ Log: test, fix to only output IRC topic if nonrelease or if PYPY_IRC_TOPIC set diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -671,7 +671,8 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - irc_topic = readenv and os.getenv('PYPY_IRC_TOPIC') + irc_topic = sys.version_info[3] != 'final' or ( + readenv and os.getenv('PYPY_IRC_TOPIC')) success = run_toplevel(interactive_console, mainmodule, quiet=not irc_topic) except SystemExit, e: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,10 +7,9 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir -from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = PYPY_VERSION[3] == "final" +is_release = sys.version_info[3] == "final" banner = sys.version.splitlines()[0] @@ -269,6 +268,11 @@ child.sendline("'' in sys.path") child.expect("True") + def test_irc_topic(self, monkeypatch): + monkeypatch.setenv('PYPY_IRC_TOPIC', '1') + child = self.spawn([]) + child.expect(irc_header) # banner + def test_help(self): # test that -h prints the usage, including the name of the executable # which should be /full/path/to/app_main.py in this case From noreply at buildbot.pypy.org Sat May 10 22:38:58 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 10 May 2014 22:38:58 +0200 (CEST) Subject: [pypy-commit] pypy default: fix docs Message-ID: <20140510203858.ACDDE1D2BB8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71441:5286728a9f98 Date: 2014-05-10 23:38 +0300 http://bitbucket.org/pypy/pypy/changeset/5286728a9f98/ Log: fix docs diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,10 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* merge PYPY_IRC_TOPIC environment variable handling from previous release - in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and - pypy/interpreter/app_main.py so release versions will not print a random - IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py From noreply at buildbot.pypy.org Sun May 11 00:27:27 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:27 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Skip a test in test_re Message-ID: <20140510222727.A0FB61C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71442:22619b806fa7 Date: 2014-05-01 09:57 +0200 http://bitbucket.org/pypy/pypy/changeset/22619b806fa7/ Log: Skip a test in test_re diff --git a/lib-python/3/test/test_re.py b/lib-python/3/test/test_re.py --- a/lib-python/3/test/test_re.py +++ b/lib-python/3/test/test_re.py @@ -1,5 +1,5 @@ from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \ - cpython_only, captured_stdout + impl_detail, captured_stdout import io import re from re import Scanner @@ -19,6 +19,7 @@ class ReTests(unittest.TestCase): + @impl_detail("pypy buffers can be resized", pypy=False) def test_keep_buffer(self): # See bug 14212 b = bytearray(b'x') @@ -1018,7 +1019,6 @@ self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128)) - @cpython_only def test_repeat_minmax_overflow_maxrepeat(self): try: from _sre import MAXREPEAT From noreply at buildbot.pypy.org Sun May 11 00:27:28 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:28 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Finish sys.implementation Message-ID: <20140510222728.F1AF71C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r71443:c7f33aefe221 Date: 2014-05-01 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c7f33aefe221/ Log: Finish sys.implementation diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -133,5 +133,7 @@ implementation = SimpleNamespace( name='pypy', + version=sys.version_info, + hexversion=sys.hexversion, cache_tag=_imp.get_tag(), ) diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -495,6 +495,24 @@ import sys assert sys.implementation.name == 'pypy' + # This test applies to all implementations equally. + levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF} + + assert sys.implementation.version + assert sys.implementation.hexversion + assert sys.implementation.cache_tag + + version = sys.implementation.version + assert version[:2] == (version.major, version.minor) + + hexversion = (version.major << 24 | version.minor << 16 | + version.micro << 8 | levels[version.releaselevel] << 4 | + version.serial << 0) + assert sys.implementation.hexversion == hexversion + + # PEP 421 requires that .name be lower case. + assert sys.implementation.name == sys.implementation.name.lower() + def test_settrace(self): import sys counts = [] From noreply at buildbot.pypy.org Sun May 11 00:27:30 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:30 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: A branch to use the "fast" libmpdec for the decimal module. Message-ID: <20140510222730.5E6051C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71444:c4c346892732 Date: 2014-05-01 11:23 +0200 http://bitbucket.org/pypy/pypy/changeset/c4c346892732/ Log: A branch to use the "fast" libmpdec for the decimal module. From noreply at buildbot.pypy.org Sun May 11 00:27:31 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:31 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add libmpdec source code, and start a _decimal module. Message-ID: <20140510222731.DE7E01C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71445:ffe162a71565 Date: 2014-05-01 21:01 +0200 http://bitbucket.org/pypy/pypy/changeset/ffe162a71565/ Log: Add libmpdec source code, and start a _decimal module. diff too long, truncating to 2000 out of 16459 lines diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/__init__.py @@ -0,0 +1,9 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + appleveldefs = { + } + + interpleveldefs = { + 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', + } diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/interp_decimal.py @@ -0,0 +1,3 @@ +from rpython.rlib import rmpdec + +IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/test/test_module.py @@ -0,0 +1,6 @@ +class AppTestDecimalModule: + spaceconfig = dict(usemodules=('_decimal',)) + + def test_constants(self): + import _decimal + assert _decimal.IEEE_CONTEXT_MAX_BITS > 3 diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rmpdec.py @@ -0,0 +1,43 @@ +import py +import sys + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.tool import rffi_platform as platform +from rpython.conftest import cdir + +libdir = py.path.local(cdir).join('src', 'libmpdec') + +compile_extra = [] +if sys.maxsize > 1<<32: + compile_extra.append("-DCONFIG_64") +else: + compile_extra.append("-DCONFIG_32") + +eci = ExternalCompilationInfo( + includes=['src/libmpdec/mpdecimal.h'], + include_dirs=[cdir], + separate_module_files=[libdir.join('mpdecimal.c'), + libdir.join('basearith.c'), + libdir.join('convolute.c'), + libdir.join('constants.c'), + libdir.join('context.c'), + libdir.join('fourstep.c'), + libdir.join('sixstep.c'), + libdir.join('transpose.c'), + libdir.join('difradix2.c'), + libdir.join('numbertheory.c'), + libdir.join('fnt.c'), + libdir.join('crt.c'), + libdir.join('memory.c'), + ], + compile_extra=compile_extra, + libraries=['m'], + ) + +class CConfig: + _compilation_info_ = eci + + MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( + 'MPD_IEEE_CONTEXT_MAX_BITS') + +globals().update(platform.configure(CConfig)) diff --git a/rpython/rlib/test/test_rmpdec.py b/rpython/rlib/test/test_rmpdec.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rmpdec.py @@ -0,0 +1,1 @@ +from rpython.rlib import rmpdec diff --git a/rpython/translator/c/src/libmpdec/README-pypy.txt b/rpython/translator/c/src/libmpdec/README-pypy.txt new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/README-pypy.txt @@ -0,0 +1,3 @@ +This libmpdec directory was directly copied from CPython. + +pyconfig.h was added, with a default configuration which works on Linux. diff --git a/rpython/translator/c/src/libmpdec/README.txt b/rpython/translator/c/src/libmpdec/README.txt new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/README.txt @@ -0,0 +1,90 @@ + + +libmpdec +======== + +libmpdec is a fast C/C++ library for correctly-rounded arbitrary precision +decimal floating point arithmetic. It is a complete implementation of +Mike Cowlishaw/IBM's General Decimal Arithmetic Specification. + + +Files required for the Python _decimal module +============================================= + + Core files for small and medium precision arithmetic + ---------------------------------------------------- + + basearith.{c,h} -> Core arithmetic in base 10**9 or 10**19. + bits.h -> Portable detection of least/most significant one-bit. + constants.{c,h} -> Constants that are used in multiple files. + context.c -> Context functions. + io.{c,h} -> Conversions between mpd_t and ASCII strings, + mpd_t formatting (allows UTF-8 fill character). + memory.{c,h} -> Allocation handlers with overflow detection + and functions for switching between static + and dynamic mpd_t. + mpdecimal.{c,h} -> All (quiet) functions of the specification. + typearith.h -> Fast primitives for double word multiplication, + division etc. + + Visual Studio only: + ~~~~~~~~~~~~~~~~~~~ + vccompat.h -> snprintf <==> sprintf_s and similar things. + vcstdint.h -> stdint.h (included in VS 2010 but not in VS 2008). + vcdiv64.asm -> Double word division used in typearith.h. VS 2008 does + not allow inline asm for x64. Also, it does not provide + an intrinsic for double word division. + + Files for bignum arithmetic: + ---------------------------- + + The following files implement the Fast Number Theoretic Transform + used for multiplying coefficients with more than 1024 words (see + mpdecimal.c: _mpd_fntmul()). + + umodarith.h -> Fast low level routines for unsigned modular arithmetic. + numbertheory.{c,h} -> Routines for setting up the Number Theoretic Transform. + difradix2.{c,h} -> Decimation in frequency transform, used as the + "base case" by the following three files: + + fnt.{c,h} -> Transform arrays up to 4096 words. + sixstep.{c,h} -> Transform larger arrays of length 2**n. + fourstep.{c,h} -> Transform larger arrays of length 3 * 2**n. + + convolute.{c,h} -> Fast convolution using one of the three transform + functions. + transpose.{c,h} -> Transpositions needed for the sixstep algorithm. + crt.{c,h} -> Chinese Remainder Theorem: use information from three + transforms modulo three different primes to get the + final result. + + +Pointers to literature, proofs and more +======================================= + + literature/ + ----------- + + REFERENCES.txt -> List of relevant papers. + bignum.txt -> Explanation of the Fast Number Theoretic Transform (FNT). + fnt.py -> Verify constants used in the FNT; Python demo for the + O(N**2) discrete transform. + + matrix-transform.txt -> Proof for the Matrix Fourier Transform used in + fourstep.c. + six-step.txt -> Show that the algorithm used in sixstep.c is + a variant of the Matrix Fourier Transform. + mulmod-64.txt -> Proof for the mulmod64 algorithm from + umodarith.h. + mulmod-ppro.txt -> Proof for the x87 FPU modular multiplication + from umodarith.h. + umodarith.lisp -> ACL2 proofs for many functions from umodarith.h. + + +Library Author +============== + + Stefan Krah + + + diff --git a/rpython/translator/c/src/libmpdec/basearith.c b/rpython/translator/c/src/libmpdec/basearith.c new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/basearith.c @@ -0,0 +1,658 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#include "mpdecimal.h" +#include +#include +#include +#include +#include "constants.h" +#include "memory.h" +#include "typearith.h" +#include "basearith.h" + + +/*********************************************************************/ +/* Calculations in base MPD_RADIX */ +/*********************************************************************/ + + +/* + * Knuth, TAOCP, Volume 2, 4.3.1: + * w := sum of u (len m) and v (len n) + * n > 0 and m >= n + * The calling function has to handle a possible final carry. + */ +mpd_uint_t +_mpd_baseadd(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v, + mpd_size_t m, mpd_size_t n) +{ + mpd_uint_t s; + mpd_uint_t carry = 0; + mpd_size_t i; + + assert(n > 0 && m >= n); + + /* add n members of u and v */ + for (i = 0; i < n; i++) { + s = u[i] + (v[i] + carry); + carry = (s < u[i]) | (s >= MPD_RADIX); + w[i] = carry ? s-MPD_RADIX : s; + } + /* if there is a carry, propagate it */ + for (; carry && i < m; i++) { + s = u[i] + carry; + carry = (s == MPD_RADIX); + w[i] = carry ? 0 : s; + } + /* copy the rest of u */ + for (; i < m; i++) { + w[i] = u[i]; + } + + return carry; +} + +/* + * Add the contents of u to w. Carries are propagated further. The caller + * has to make sure that w is big enough. + */ +void +_mpd_baseaddto(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n) +{ + mpd_uint_t s; + mpd_uint_t carry = 0; + mpd_size_t i; + + if (n == 0) return; + + /* add n members of u to w */ + for (i = 0; i < n; i++) { + s = w[i] + (u[i] + carry); + carry = (s < w[i]) | (s >= MPD_RADIX); + w[i] = carry ? s-MPD_RADIX : s; + } + /* if there is a carry, propagate it */ + for (; carry; i++) { + s = w[i] + carry; + carry = (s == MPD_RADIX); + w[i] = carry ? 0 : s; + } +} + +/* + * Add v to w (len m). The calling function has to handle a possible + * final carry. Assumption: m > 0. + */ +mpd_uint_t +_mpd_shortadd(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v) +{ + mpd_uint_t s; + mpd_uint_t carry; + mpd_size_t i; + + assert(m > 0); + + /* add v to w */ + s = w[0] + v; + carry = (s < v) | (s >= MPD_RADIX); + w[0] = carry ? s-MPD_RADIX : s; + + /* if there is a carry, propagate it */ + for (i = 1; carry && i < m; i++) { + s = w[i] + carry; + carry = (s == MPD_RADIX); + w[i] = carry ? 0 : s; + } + + return carry; +} + +/* Increment u. The calling function has to handle a possible carry. */ +mpd_uint_t +_mpd_baseincr(mpd_uint_t *u, mpd_size_t n) +{ + mpd_uint_t s; + mpd_uint_t carry = 1; + mpd_size_t i; + + assert(n > 0); + + /* if there is a carry, propagate it */ + for (i = 0; carry && i < n; i++) { + s = u[i] + carry; + carry = (s == MPD_RADIX); + u[i] = carry ? 0 : s; + } + + return carry; +} + +/* + * Knuth, TAOCP, Volume 2, 4.3.1: + * w := difference of u (len m) and v (len n). + * number in u >= number in v; + */ +void +_mpd_basesub(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v, + mpd_size_t m, mpd_size_t n) +{ + mpd_uint_t d; + mpd_uint_t borrow = 0; + mpd_size_t i; + + assert(m > 0 && n > 0); + + /* subtract n members of v from u */ + for (i = 0; i < n; i++) { + d = u[i] - (v[i] + borrow); + borrow = (u[i] < d); + w[i] = borrow ? d + MPD_RADIX : d; + } + /* if there is a borrow, propagate it */ + for (; borrow && i < m; i++) { + d = u[i] - borrow; + borrow = (u[i] == 0); + w[i] = borrow ? MPD_RADIX-1 : d; + } + /* copy the rest of u */ + for (; i < m; i++) { + w[i] = u[i]; + } +} + +/* + * Subtract the contents of u from w. w is larger than u. Borrows are + * propagated further, but eventually w can absorb the final borrow. + */ +void +_mpd_basesubfrom(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n) +{ + mpd_uint_t d; + mpd_uint_t borrow = 0; + mpd_size_t i; + + if (n == 0) return; + + /* subtract n members of u from w */ + for (i = 0; i < n; i++) { + d = w[i] - (u[i] + borrow); + borrow = (w[i] < d); + w[i] = borrow ? d + MPD_RADIX : d; + } + /* if there is a borrow, propagate it */ + for (; borrow; i++) { + d = w[i] - borrow; + borrow = (w[i] == 0); + w[i] = borrow ? MPD_RADIX-1 : d; + } +} + +/* w := product of u (len n) and v (single word) */ +void +_mpd_shortmul(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, mpd_uint_t v) +{ + mpd_uint_t hi, lo; + mpd_uint_t carry = 0; + mpd_size_t i; + + assert(n > 0); + + for (i=0; i < n; i++) { + + _mpd_mul_words(&hi, &lo, u[i], v); + lo = carry + lo; + if (lo < carry) hi++; + + _mpd_div_words_r(&carry, &w[i], hi, lo); + } + w[i] = carry; +} + +/* + * Knuth, TAOCP, Volume 2, 4.3.1: + * w := product of u (len m) and v (len n) + * w must be initialized to zero + */ +void +_mpd_basemul(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v, + mpd_size_t m, mpd_size_t n) +{ + mpd_uint_t hi, lo; + mpd_uint_t carry; + mpd_size_t i, j; + + assert(m > 0 && n > 0); + + for (j=0; j < n; j++) { + carry = 0; + for (i=0; i < m; i++) { + + _mpd_mul_words(&hi, &lo, u[i], v[j]); + lo = w[i+j] + lo; + if (lo < w[i+j]) hi++; + lo = carry + lo; + if (lo < carry) hi++; + + _mpd_div_words_r(&carry, &w[i+j], hi, lo); + } + w[j+m] = carry; + } +} + +/* + * Knuth, TAOCP Volume 2, 4.3.1, exercise 16: + * w := quotient of u (len n) divided by a single word v + */ +mpd_uint_t +_mpd_shortdiv(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, mpd_uint_t v) +{ + mpd_uint_t hi, lo; + mpd_uint_t rem = 0; + mpd_size_t i; + + assert(n > 0); + + for (i=n-1; i != MPD_SIZE_MAX; i--) { + + _mpd_mul_words(&hi, &lo, rem, MPD_RADIX); + lo = u[i] + lo; + if (lo < u[i]) hi++; + + _mpd_div_words(&w[i], &rem, hi, lo, v); + } + + return rem; +} + +/* + * Knuth, TAOCP Volume 2, 4.3.1: + * q, r := quotient and remainder of uconst (len nplusm) + * divided by vconst (len n) + * nplusm >= n + * + * If r is not NULL, r will contain the remainder. If r is NULL, the + * return value indicates if there is a remainder: 1 for true, 0 for + * false. A return value of -1 indicates an error. + */ +int +_mpd_basedivmod(mpd_uint_t *q, mpd_uint_t *r, + const mpd_uint_t *uconst, const mpd_uint_t *vconst, + mpd_size_t nplusm, mpd_size_t n) +{ + mpd_uint_t ustatic[MPD_MINALLOC_MAX]; + mpd_uint_t vstatic[MPD_MINALLOC_MAX]; + mpd_uint_t *u = ustatic; + mpd_uint_t *v = vstatic; + mpd_uint_t d, qhat, rhat, w2[2]; + mpd_uint_t hi, lo, x; + mpd_uint_t carry; + mpd_size_t i, j, m; + int retval = 0; + + assert(n > 1 && nplusm >= n); + m = sub_size_t(nplusm, n); + + /* D1: normalize */ + d = MPD_RADIX / (vconst[n-1] + 1); + + if (nplusm >= MPD_MINALLOC_MAX) { + if ((u = mpd_alloc(nplusm+1, sizeof *u)) == NULL) { + return -1; + } + } + if (n >= MPD_MINALLOC_MAX) { + if ((v = mpd_alloc(n+1, sizeof *v)) == NULL) { + mpd_free(u); + return -1; + } + } + + _mpd_shortmul(u, uconst, nplusm, d); + _mpd_shortmul(v, vconst, n, d); + + /* D2: loop */ + for (j=m; j != MPD_SIZE_MAX; j--) { + + /* D3: calculate qhat and rhat */ + rhat = _mpd_shortdiv(w2, u+j+n-1, 2, v[n-1]); + qhat = w2[1] * MPD_RADIX + w2[0]; + + while (1) { + if (qhat < MPD_RADIX) { + _mpd_singlemul(w2, qhat, v[n-2]); + if (w2[1] <= rhat) { + if (w2[1] != rhat || w2[0] <= u[j+n-2]) { + break; + } + } + } + qhat -= 1; + rhat += v[n-1]; + if (rhat < v[n-1] || rhat >= MPD_RADIX) { + break; + } + } + /* D4: multiply and subtract */ + carry = 0; + for (i=0; i <= n; i++) { + + _mpd_mul_words(&hi, &lo, qhat, v[i]); + + lo = carry + lo; + if (lo < carry) hi++; + + _mpd_div_words_r(&hi, &lo, hi, lo); + + x = u[i+j] - lo; + carry = (u[i+j] < x); + u[i+j] = carry ? x+MPD_RADIX : x; + carry += hi; + } + q[j] = qhat; + /* D5: test remainder */ + if (carry) { + q[j] -= 1; + /* D6: add back */ + (void)_mpd_baseadd(u+j, u+j, v, n+1, n); + } + } + + /* D8: unnormalize */ + if (r != NULL) { + _mpd_shortdiv(r, u, n, d); + /* we are not interested in the return value here */ + retval = 0; + } + else { + retval = !_mpd_isallzero(u, n); + } + + +if (u != ustatic) mpd_free(u); +if (v != vstatic) mpd_free(v); +return retval; +} + +/* + * Left shift of src by 'shift' digits; src may equal dest. + * + * dest := area of n mpd_uint_t with space for srcdigits+shift digits. + * src := coefficient with length m. + * + * The case splits in the function are non-obvious. The following + * equations might help: + * + * Let msdigits denote the number of digits in the most significant + * word of src. Then 1 <= msdigits <= rdigits. + * + * 1) shift = q * rdigits + r + * 2) srcdigits = qsrc * rdigits + msdigits + * 3) destdigits = shift + srcdigits + * = q * rdigits + r + qsrc * rdigits + msdigits + * = q * rdigits + (qsrc * rdigits + (r + msdigits)) + * + * The result has q zero words, followed by the coefficient that + * is left-shifted by r. The case r == 0 is trivial. For r > 0, it + * is important to keep in mind that we always read m source words, + * but write m+1 destination words if r + msdigits > rdigits, m words + * otherwise. + */ +void +_mpd_baseshiftl(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t n, mpd_size_t m, + mpd_size_t shift) +{ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) + /* spurious uninitialized warnings */ + mpd_uint_t l=l, lprev=lprev, h=h; +#else + mpd_uint_t l, lprev, h; +#endif + mpd_uint_t q, r; + mpd_uint_t ph; + + assert(m > 0 && n >= m); + + _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS); + + if (r != 0) { + + ph = mpd_pow10[r]; + + --m; --n; + _mpd_divmod_pow10(&h, &lprev, src[m--], MPD_RDIGITS-r); + if (h != 0) { /* r + msdigits > rdigits <==> h != 0 */ + dest[n--] = h; + } + /* write m-1 shifted words */ + for (; m != MPD_SIZE_MAX; m--,n--) { + _mpd_divmod_pow10(&h, &l, src[m], MPD_RDIGITS-r); + dest[n] = ph * lprev + h; + lprev = l; + } + /* write least significant word */ + dest[q] = ph * lprev; + } + else { + while (--m != MPD_SIZE_MAX) { + dest[m+q] = src[m]; + } + } + + mpd_uint_zero(dest, q); +} + +/* + * Right shift of src by 'shift' digits; src may equal dest. + * Assumption: srcdigits-shift > 0. + * + * dest := area with space for srcdigits-shift digits. + * src := coefficient with length 'slen'. + * + * The case splits in the function rely on the following equations: + * + * Let msdigits denote the number of digits in the most significant + * word of src. Then 1 <= msdigits <= rdigits. + * + * 1) shift = q * rdigits + r + * 2) srcdigits = qsrc * rdigits + msdigits + * 3) destdigits = srcdigits - shift + * = qsrc * rdigits + msdigits - (q * rdigits + r) + * = (qsrc - q) * rdigits + msdigits - r + * + * Since destdigits > 0 and 1 <= msdigits <= rdigits: + * + * 4) qsrc >= q + * 5) qsrc == q ==> msdigits > r + * + * The result has slen-q words if msdigits > r, slen-q-1 words otherwise. + */ +mpd_uint_t +_mpd_baseshiftr(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t slen, + mpd_size_t shift) +{ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) + /* spurious uninitialized warnings */ + mpd_uint_t l=l, h=h, hprev=hprev; /* low, high, previous high */ +#else + mpd_uint_t l, h, hprev; /* low, high, previous high */ +#endif + mpd_uint_t rnd, rest; /* rounding digit, rest */ + mpd_uint_t q, r; + mpd_size_t i, j; + mpd_uint_t ph; + + assert(slen > 0); + + _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS); + + rnd = rest = 0; + if (r != 0) { + + ph = mpd_pow10[MPD_RDIGITS-r]; + + _mpd_divmod_pow10(&hprev, &rest, src[q], r); + _mpd_divmod_pow10(&rnd, &rest, rest, r-1); + + if (rest == 0 && q > 0) { + rest = !_mpd_isallzero(src, q); + } + /* write slen-q-1 words */ + for (j=0,i=q+1; i 0) { + _mpd_divmod_pow10(&rnd, &rest, src[q-1], MPD_RDIGITS-1); + /* is there any non-zero digit below rnd? */ + if (rest == 0) rest = !_mpd_isallzero(src, q-1); + } + for (j = 0; j < slen-q; j++) { + dest[j] = src[q+j]; + } + } + + /* 0-4 ==> rnd+rest < 0.5 */ + /* 5 ==> rnd+rest == 0.5 */ + /* 6-9 ==> rnd+rest > 0.5 */ + return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd; +} + + +/*********************************************************************/ +/* Calculations in base b */ +/*********************************************************************/ + +/* + * Add v to w (len m). The calling function has to handle a possible + * final carry. Assumption: m > 0. + */ +mpd_uint_t +_mpd_shortadd_b(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v, mpd_uint_t b) +{ + mpd_uint_t s; + mpd_uint_t carry; + mpd_size_t i; + + assert(m > 0); + + /* add v to w */ + s = w[0] + v; + carry = (s < v) | (s >= b); + w[0] = carry ? s-b : s; + + /* if there is a carry, propagate it */ + for (i = 1; carry && i < m; i++) { + s = w[i] + carry; + carry = (s == b); + w[i] = carry ? 0 : s; + } + + return carry; +} + +/* w := product of u (len n) and v (single word). Return carry. */ +mpd_uint_t +_mpd_shortmul_c(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, mpd_uint_t v) +{ + mpd_uint_t hi, lo; + mpd_uint_t carry = 0; + mpd_size_t i; + + assert(n > 0); + + for (i=0; i < n; i++) { + + _mpd_mul_words(&hi, &lo, u[i], v); + lo = carry + lo; + if (lo < carry) hi++; + + _mpd_div_words_r(&carry, &w[i], hi, lo); + } + + return carry; +} + +/* w := product of u (len n) and v (single word) */ +mpd_uint_t +_mpd_shortmul_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v, mpd_uint_t b) +{ + mpd_uint_t hi, lo; + mpd_uint_t carry = 0; + mpd_size_t i; + + assert(n > 0); + + for (i=0; i < n; i++) { + + _mpd_mul_words(&hi, &lo, u[i], v); + lo = carry + lo; + if (lo < carry) hi++; + + _mpd_div_words(&carry, &w[i], hi, lo, b); + } + + return carry; +} + +/* + * Knuth, TAOCP Volume 2, 4.3.1, exercise 16: + * w := quotient of u (len n) divided by a single word v + */ +mpd_uint_t +_mpd_shortdiv_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v, mpd_uint_t b) +{ + mpd_uint_t hi, lo; + mpd_uint_t rem = 0; + mpd_size_t i; + + assert(n > 0); + + for (i=n-1; i != MPD_SIZE_MAX; i--) { + + _mpd_mul_words(&hi, &lo, rem, b); + lo = u[i] + lo; + if (lo < u[i]) hi++; + + _mpd_div_words(&w[i], &rem, hi, lo, v); + } + + return rem; +} + + + diff --git a/rpython/translator/c/src/libmpdec/basearith.h b/rpython/translator/c/src/libmpdec/basearith.h new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/basearith.h @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#ifndef BASEARITH_H +#define BASEARITH_H + + +#include "mpdecimal.h" +#include +#include "typearith.h" + + +/* Internal header file: all symbols have local scope in the DSO */ +MPD_PRAGMA(MPD_HIDE_SYMBOLS_START) + + +mpd_uint_t _mpd_baseadd(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v, + mpd_size_t m, mpd_size_t n); +void _mpd_baseaddto(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n); +mpd_uint_t _mpd_shortadd(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v); +mpd_uint_t _mpd_shortadd_b(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v, + mpd_uint_t b); +mpd_uint_t _mpd_baseincr(mpd_uint_t *u, mpd_size_t n); +void _mpd_basesub(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v, + mpd_size_t m, mpd_size_t n); +void _mpd_basesubfrom(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n); +void _mpd_basemul(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v, + mpd_size_t m, mpd_size_t n); +void _mpd_shortmul(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v); +mpd_uint_t _mpd_shortmul_c(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v); +mpd_uint_t _mpd_shortmul_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v, mpd_uint_t b); +mpd_uint_t _mpd_shortdiv(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v); +mpd_uint_t _mpd_shortdiv_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, + mpd_uint_t v, mpd_uint_t b); +int _mpd_basedivmod(mpd_uint_t *q, mpd_uint_t *r, const mpd_uint_t *uconst, + const mpd_uint_t *vconst, mpd_size_t nplusm, mpd_size_t n); +void _mpd_baseshiftl(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t n, + mpd_size_t m, mpd_size_t shift); +mpd_uint_t _mpd_baseshiftr(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t slen, + mpd_size_t shift); + + + +#ifdef CONFIG_64 +extern const mpd_uint_t mprime_rdx; + +/* + * Algorithm from: Division by Invariant Integers using Multiplication, + * T. Granlund and P. L. Montgomery, Proceedings of the SIGPLAN '94 + * Conference on Programming Language Design and Implementation. + * + * http://gmplib.org/~tege/divcnst-pldi94.pdf + * + * Variables from the paper and their translations (See section 8): + * + * N := 64 + * d := MPD_RADIX + * l := 64 + * m' := floor((2**(64+64) - 1)/MPD_RADIX) - 2**64 + * + * Since N-l == 0: + * + * dnorm := d + * n2 := hi + * n10 := lo + * + * ACL2 proof: mpd-div-words-r-correct + */ +static inline void +_mpd_div_words_r(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo) +{ + mpd_uint_t n_adj, h, l, t; + mpd_uint_t n1_neg; + + /* n1_neg = if lo >= 2**63 then MPD_UINT_MAX else 0 */ + n1_neg = (lo & (1ULL<<63)) ? MPD_UINT_MAX : 0; + /* n_adj = if lo >= 2**63 then lo+MPD_RADIX else lo */ + n_adj = lo + (n1_neg & MPD_RADIX); + + /* (h, l) = if lo >= 2**63 then m'*(hi+1) else m'*hi */ + _mpd_mul_words(&h, &l, mprime_rdx, hi-n1_neg); + l = l + n_adj; + if (l < n_adj) h++; + t = h + hi; + /* At this point t == qest, with q == qest or q == qest+1: + * 1) 0 <= 2**64*hi + lo - qest*MPD_RADIX < 2*MPD_RADIX + */ + + /* t = 2**64-1 - qest = 2**64 - (qest+1) */ + t = MPD_UINT_MAX - t; + + /* (h, l) = 2**64*MPD_RADIX - (qest+1)*MPD_RADIX */ + _mpd_mul_words(&h, &l, t, MPD_RADIX); + l = l + lo; + if (l < lo) h++; + h += hi; + h -= MPD_RADIX; + /* (h, l) = 2**64*hi + lo - (qest+1)*MPD_RADIX (mod 2**128) + * Case q == qest+1: + * a) h == 0, l == r + * b) q := h - t == qest+1 + * c) r := l + * Case q == qest: + * a) h == MPD_UINT_MAX, l == 2**64-(MPD_RADIX-r) + * b) q := h - t == qest + * c) r := l + MPD_RADIX = r + */ + + *q = (h - t); + *r = l + (MPD_RADIX & h); +} +#else +static inline void +_mpd_div_words_r(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo) +{ + _mpd_div_words(q, r, hi, lo, MPD_RADIX); +} +#endif + + +/* Multiply two single base MPD_RADIX words, store result in array w[2]. */ +static inline void +_mpd_singlemul(mpd_uint_t w[2], mpd_uint_t u, mpd_uint_t v) +{ + mpd_uint_t hi, lo; + + _mpd_mul_words(&hi, &lo, u, v); + _mpd_div_words_r(&w[1], &w[0], hi, lo); +} + +/* Multiply u (len 2) and v (len m, 1 <= m <= 2). */ +static inline void +_mpd_mul_2_le2(mpd_uint_t w[4], mpd_uint_t u[2], mpd_uint_t v[2], mpd_ssize_t m) +{ + mpd_uint_t hi, lo; + + _mpd_mul_words(&hi, &lo, u[0], v[0]); + _mpd_div_words_r(&w[1], &w[0], hi, lo); + + _mpd_mul_words(&hi, &lo, u[1], v[0]); + lo = w[1] + lo; + if (lo < w[1]) hi++; + _mpd_div_words_r(&w[2], &w[1], hi, lo); + if (m == 1) return; + + _mpd_mul_words(&hi, &lo, u[0], v[1]); + lo = w[1] + lo; + if (lo < w[1]) hi++; + _mpd_div_words_r(&w[3], &w[1], hi, lo); + + _mpd_mul_words(&hi, &lo, u[1], v[1]); + lo = w[2] + lo; + if (lo < w[2]) hi++; + lo = w[3] + lo; + if (lo < w[3]) hi++; + _mpd_div_words_r(&w[3], &w[2], hi, lo); +} + + +/* + * Test if all words from data[len-1] to data[0] are zero. If len is 0, nothing + * is tested and the coefficient is regarded as "all zero". + */ +static inline int +_mpd_isallzero(const mpd_uint_t *data, mpd_ssize_t len) +{ + while (--len >= 0) { + if (data[len] != 0) return 0; + } + return 1; +} + +/* + * Test if all full words from data[len-1] to data[0] are MPD_RADIX-1 + * (all nines). Return true if len == 0. + */ +static inline int +_mpd_isallnine(const mpd_uint_t *data, mpd_ssize_t len) +{ + while (--len >= 0) { + if (data[len] != MPD_RADIX-1) return 0; + } + return 1; +} + + +MPD_PRAGMA(MPD_HIDE_SYMBOLS_END) /* restore previous scope rules */ + + +#endif /* BASEARITH_H */ + + + diff --git a/rpython/translator/c/src/libmpdec/bits.h b/rpython/translator/c/src/libmpdec/bits.h new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/bits.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#ifndef BITS_H +#define BITS_H + + +#include "mpdecimal.h" +#include + + +/* Check if n is a power of 2. */ +static inline int +ispower2(mpd_size_t n) +{ + return n != 0 && (n & (n-1)) == 0; +} + +#if defined(ANSI) +/* + * Return the most significant bit position of n from 0 to 31 (63). + * Assumptions: n != 0. + */ +static inline int +mpd_bsr(mpd_size_t n) +{ + int pos = 0; + mpd_size_t tmp; + +#ifdef CONFIG_64 + tmp = n >> 32; + if (tmp != 0) { n = tmp; pos += 32; } +#endif + tmp = n >> 16; + if (tmp != 0) { n = tmp; pos += 16; } + tmp = n >> 8; + if (tmp != 0) { n = tmp; pos += 8; } + tmp = n >> 4; + if (tmp != 0) { n = tmp; pos += 4; } + tmp = n >> 2; + if (tmp != 0) { n = tmp; pos += 2; } + tmp = n >> 1; + if (tmp != 0) { n = tmp; pos += 1; } + + return pos + (int)n - 1; +} + +/* + * Return the least significant bit position of n from 0 to 31 (63). + * Assumptions: n != 0. + */ +static inline int +mpd_bsf(mpd_size_t n) +{ + int pos; + +#ifdef CONFIG_64 + pos = 63; + if (n & 0x00000000FFFFFFFFULL) { pos -= 32; } else { n >>= 32; } + if (n & 0x000000000000FFFFULL) { pos -= 16; } else { n >>= 16; } + if (n & 0x00000000000000FFULL) { pos -= 8; } else { n >>= 8; } + if (n & 0x000000000000000FULL) { pos -= 4; } else { n >>= 4; } + if (n & 0x0000000000000003ULL) { pos -= 2; } else { n >>= 2; } + if (n & 0x0000000000000001ULL) { pos -= 1; } +#else + pos = 31; + if (n & 0x000000000000FFFFUL) { pos -= 16; } else { n >>= 16; } + if (n & 0x00000000000000FFUL) { pos -= 8; } else { n >>= 8; } + if (n & 0x000000000000000FUL) { pos -= 4; } else { n >>= 4; } + if (n & 0x0000000000000003UL) { pos -= 2; } else { n >>= 2; } + if (n & 0x0000000000000001UL) { pos -= 1; } +#endif + return pos; +} +/* END ANSI */ + +#elif defined(ASM) +/* + * Bit scan reverse. Assumptions: a != 0. + */ +static inline int +mpd_bsr(mpd_size_t a) +{ + mpd_size_t retval; + + __asm__ ( +#ifdef CONFIG_64 + "bsrq %1, %0\n\t" +#else + "bsr %1, %0\n\t" +#endif + :"=r" (retval) + :"r" (a) + :"cc" + ); + + return (int)retval; +} + +/* + * Bit scan forward. Assumptions: a != 0. + */ +static inline int +mpd_bsf(mpd_size_t a) +{ + mpd_size_t retval; + + __asm__ ( +#ifdef CONFIG_64 + "bsfq %1, %0\n\t" +#else + "bsf %1, %0\n\t" +#endif + :"=r" (retval) + :"r" (a) + :"cc" + ); + + return (int)retval; +} +/* END ASM */ + +#elif defined(MASM) +#include +/* + * Bit scan reverse. Assumptions: a != 0. + */ +static inline int __cdecl +mpd_bsr(mpd_size_t a) +{ + unsigned long retval; + +#ifdef CONFIG_64 + _BitScanReverse64(&retval, a); +#else + _BitScanReverse(&retval, a); +#endif + + return (int)retval; +} + +/* + * Bit scan forward. Assumptions: a != 0. + */ +static inline int __cdecl +mpd_bsf(mpd_size_t a) +{ + unsigned long retval; + +#ifdef CONFIG_64 + _BitScanForward64(&retval, a); +#else + _BitScanForward(&retval, a); +#endif + + return (int)retval; +} +/* END MASM (_MSC_VER) */ +#else + #error "missing preprocessor definitions" +#endif /* BSR/BSF */ + + +#endif /* BITS_H */ + + + diff --git a/rpython/translator/c/src/libmpdec/constants.c b/rpython/translator/c/src/libmpdec/constants.c new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/constants.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#include "mpdecimal.h" +#include +#include "constants.h" + + +#if defined(CONFIG_64) + + /* number-theory.c */ + const mpd_uint_t mpd_moduli[3] = { + 18446744069414584321ULL, 18446744056529682433ULL, 18446742974197923841ULL + }; + const mpd_uint_t mpd_roots[3] = {7ULL, 10ULL, 19ULL}; + + /* crt.c */ + const mpd_uint_t INV_P1_MOD_P2 = 18446744055098026669ULL; + const mpd_uint_t INV_P1P2_MOD_P3 = 287064143708160ULL; + const mpd_uint_t LH_P1P2 = 18446744052234715137ULL; /* (P1*P2) % 2^64 */ + const mpd_uint_t UH_P1P2 = 18446744052234715141ULL; /* (P1*P2) / 2^64 */ + + /* transpose.c */ + const mpd_size_t mpd_bits[64] = { + 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, + 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, + 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, + 2147483648ULL, 4294967296ULL, 8589934592ULL, 17179869184ULL, 34359738368ULL, + 68719476736ULL, 137438953472ULL, 274877906944ULL, 549755813888ULL, + 1099511627776ULL, 2199023255552ULL, 4398046511104, 8796093022208ULL, + 17592186044416ULL, 35184372088832ULL, 70368744177664ULL, 140737488355328ULL, + 281474976710656ULL, 562949953421312ULL, 1125899906842624ULL, + 2251799813685248ULL, 4503599627370496ULL, 9007199254740992ULL, + 18014398509481984ULL, 36028797018963968ULL, 72057594037927936ULL, + 144115188075855872ULL, 288230376151711744ULL, 576460752303423488ULL, + 1152921504606846976ULL, 2305843009213693952ULL, 4611686018427387904ULL, + 9223372036854775808ULL + }; + + /* mpdecimal.c */ + const mpd_uint_t mpd_pow10[MPD_RDIGITS+1] = { + 1,10,100,1000,10000,100000,1000000,10000000,100000000,1000000000, + 10000000000ULL,100000000000ULL,1000000000000ULL,10000000000000ULL, + 100000000000000ULL,1000000000000000ULL,10000000000000000ULL, + 100000000000000000ULL,1000000000000000000ULL,10000000000000000000ULL + }; + + /* magic number for constant division by MPD_RADIX */ + const mpd_uint_t mprime_rdx = 15581492618384294730ULL; + +#elif defined(CONFIG_32) + + /* number-theory.c */ + const mpd_uint_t mpd_moduli[3] = {2113929217UL, 2013265921UL, 1811939329UL}; + const mpd_uint_t mpd_roots[3] = {5UL, 31UL, 13UL}; + + /* PentiumPro modular multiplication: These constants have to be loaded as + * 80 bit long doubles, which are not supported by certain compilers. */ + const uint32_t mpd_invmoduli[3][3] = { + {4293885170U, 2181570688U, 16352U}, /* ((long double) 1 / 2113929217UL) */ + {1698898177U, 2290649223U, 16352U}, /* ((long double) 1 / 2013265921UL) */ + {2716021846U, 2545165803U, 16352U} /* ((long double) 1 / 1811939329UL) */ + }; + + const float MPD_TWO63 = 9223372036854775808.0; /* 2^63 */ + + /* crt.c */ + const mpd_uint_t INV_P1_MOD_P2 = 2013265901UL; + const mpd_uint_t INV_P1P2_MOD_P3 = 54UL; + const mpd_uint_t LH_P1P2 = 4127195137UL; /* (P1*P2) % 2^32 */ + const mpd_uint_t UH_P1P2 = 990904320UL; /* (P1*P2) / 2^32 */ + + /* transpose.c */ + const mpd_size_t mpd_bits[32] = { + 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, + 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, + 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, + 2147483648UL + }; + + /* mpdecimal.c */ + const mpd_uint_t mpd_pow10[MPD_RDIGITS+1] = { + 1,10,100,1000,10000,100000,1000000,10000000,100000000,1000000000 + }; + +#else + #error "CONFIG_64 or CONFIG_32 must be defined." +#endif + +const char *mpd_round_string[MPD_ROUND_GUARD] = { + "ROUND_UP", /* round away from 0 */ + "ROUND_DOWN", /* round toward 0 (truncate) */ + "ROUND_CEILING", /* round toward +infinity */ + "ROUND_FLOOR", /* round toward -infinity */ + "ROUND_HALF_UP", /* 0.5 is rounded up */ + "ROUND_HALF_DOWN", /* 0.5 is rounded down */ + "ROUND_HALF_EVEN", /* 0.5 is rounded to even */ + "ROUND_05UP", /* round zero or five away from 0 */ + "ROUND_TRUNC", /* truncate, but set infinity */ +}; + +const char *mpd_clamp_string[MPD_CLAMP_GUARD] = { + "CLAMP_DEFAULT", + "CLAMP_IEEE_754" +}; + + diff --git a/rpython/translator/c/src/libmpdec/constants.h b/rpython/translator/c/src/libmpdec/constants.h new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/constants.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#ifndef CONSTANTS_H +#define CONSTANTS_H + + +#include "mpdecimal.h" + + +/* Internal header file: all symbols have local scope in the DSO */ +MPD_PRAGMA(MPD_HIDE_SYMBOLS_START) + + +/* choice of optimized functions */ +#if defined(CONFIG_64) +/* x64 */ + #define MULMOD(a, b) x64_mulmod(a, b, umod) + #define MULMOD2C(a0, a1, w) x64_mulmod2c(a0, a1, w, umod) + #define MULMOD2(a0, b0, a1, b1) x64_mulmod2(a0, b0, a1, b1, umod) + #define POWMOD(base, exp) x64_powmod(base, exp, umod) + #define SETMODULUS(modnum) std_setmodulus(modnum, &umod) + #define SIZE3_NTT(x0, x1, x2, w3table) std_size3_ntt(x0, x1, x2, w3table, umod) +#elif defined(PPRO) +/* PentiumPro (or later) gcc inline asm */ + #define MULMOD(a, b) ppro_mulmod(a, b, &dmod, dinvmod) + #define MULMOD2C(a0, a1, w) ppro_mulmod2c(a0, a1, w, &dmod, dinvmod) + #define MULMOD2(a0, b0, a1, b1) ppro_mulmod2(a0, b0, a1, b1, &dmod, dinvmod) + #define POWMOD(base, exp) ppro_powmod(base, exp, &dmod, dinvmod) + #define SETMODULUS(modnum) ppro_setmodulus(modnum, &umod, &dmod, dinvmod) + #define SIZE3_NTT(x0, x1, x2, w3table) ppro_size3_ntt(x0, x1, x2, w3table, umod, &dmod, dinvmod) +#else + /* ANSI C99 */ + #define MULMOD(a, b) std_mulmod(a, b, umod) + #define MULMOD2C(a0, a1, w) std_mulmod2c(a0, a1, w, umod) + #define MULMOD2(a0, b0, a1, b1) std_mulmod2(a0, b0, a1, b1, umod) + #define POWMOD(base, exp) std_powmod(base, exp, umod) + #define SETMODULUS(modnum) std_setmodulus(modnum, &umod) + #define SIZE3_NTT(x0, x1, x2, w3table) std_size3_ntt(x0, x1, x2, w3table, umod) +#endif + +/* PentiumPro (or later) gcc inline asm */ +extern const float MPD_TWO63; +extern const uint32_t mpd_invmoduli[3][3]; + +enum {P1, P2, P3}; + +extern const mpd_uint_t mpd_moduli[]; +extern const mpd_uint_t mpd_roots[]; +extern const mpd_size_t mpd_bits[]; +extern const mpd_uint_t mpd_pow10[]; + +extern const mpd_uint_t INV_P1_MOD_P2; +extern const mpd_uint_t INV_P1P2_MOD_P3; +extern const mpd_uint_t LH_P1P2; +extern const mpd_uint_t UH_P1P2; + + +MPD_PRAGMA(MPD_HIDE_SYMBOLS_END) /* restore previous scope rules */ + + +#endif /* CONSTANTS_H */ + + + diff --git a/rpython/translator/c/src/libmpdec/context.c b/rpython/translator/c/src/libmpdec/context.c new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/context.c @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#include "mpdecimal.h" +#include +#include +#include + + +void +mpd_dflt_traphandler(mpd_context_t *ctx UNUSED) +{ + raise(SIGFPE); +} + +void (* mpd_traphandler)(mpd_context_t *) = mpd_dflt_traphandler; + + +/* Set guaranteed minimum number of coefficient words. The function may + be used once at program start. Setting MPD_MINALLOC to out-of-bounds + values is a catastrophic error, so in that case the function exits rather + than relying on the user to check a return value. */ +void +mpd_setminalloc(mpd_ssize_t n) +{ + static int minalloc_is_set = 0; + + if (minalloc_is_set) { + mpd_err_warn("mpd_setminalloc: ignoring request to set " + "MPD_MINALLOC a second time\n"); + return; + } + if (n < MPD_MINALLOC_MIN || n > MPD_MINALLOC_MAX) { + mpd_err_fatal("illegal value for MPD_MINALLOC"); /* GCOV_NOT_REACHED */ + } + MPD_MINALLOC = n; + minalloc_is_set = 1; +} + +void +mpd_init(mpd_context_t *ctx, mpd_ssize_t prec) +{ + mpd_ssize_t ideal_minalloc; + + mpd_defaultcontext(ctx); + + if (!mpd_qsetprec(ctx, prec)) { + mpd_addstatus_raise(ctx, MPD_Invalid_context); + return; + } + + ideal_minalloc = 2 * ((prec+MPD_RDIGITS-1) / MPD_RDIGITS); + if (ideal_minalloc < MPD_MINALLOC_MIN) ideal_minalloc = MPD_MINALLOC_MIN; + if (ideal_minalloc > MPD_MINALLOC_MAX) ideal_minalloc = MPD_MINALLOC_MAX; + + mpd_setminalloc(ideal_minalloc); +} + +void +mpd_maxcontext(mpd_context_t *ctx) +{ + ctx->prec=MPD_MAX_PREC; + ctx->emax=MPD_MAX_EMAX; + ctx->emin=MPD_MIN_EMIN; + ctx->round=MPD_ROUND_HALF_EVEN; + ctx->traps=MPD_Traps; + ctx->status=0; + ctx->newtrap=0; + ctx->clamp=0; + ctx->allcr=1; +} + +void +mpd_defaultcontext(mpd_context_t *ctx) +{ + ctx->prec=2*MPD_RDIGITS; + ctx->emax=MPD_MAX_EMAX; + ctx->emin=MPD_MIN_EMIN; + ctx->round=MPD_ROUND_HALF_UP; + ctx->traps=MPD_Traps; + ctx->status=0; + ctx->newtrap=0; + ctx->clamp=0; + ctx->allcr=1; +} + +void +mpd_basiccontext(mpd_context_t *ctx) +{ + ctx->prec=9; + ctx->emax=MPD_MAX_EMAX; + ctx->emin=MPD_MIN_EMIN; + ctx->round=MPD_ROUND_HALF_UP; + ctx->traps=MPD_Traps|MPD_Clamped; + ctx->status=0; + ctx->newtrap=0; + ctx->clamp=0; + ctx->allcr=1; +} + +int +mpd_ieee_context(mpd_context_t *ctx, int bits) +{ + if (bits <= 0 || bits > MPD_IEEE_CONTEXT_MAX_BITS || bits % 32) { + return -1; + } + + ctx->prec = 9 * (bits/32) - 2; + ctx->emax = 3 * ((mpd_ssize_t)1<<(bits/16+3)); + ctx->emin = 1 - ctx->emax; + ctx->round=MPD_ROUND_HALF_EVEN; + ctx->traps=0; + ctx->status=0; + ctx->newtrap=0; + ctx->clamp=1; + ctx->allcr=1; + + return 0; +} + +mpd_ssize_t +mpd_getprec(const mpd_context_t *ctx) +{ + return ctx->prec; +} + +mpd_ssize_t +mpd_getemax(const mpd_context_t *ctx) +{ + return ctx->emax; +} + +mpd_ssize_t +mpd_getemin(const mpd_context_t *ctx) +{ + return ctx->emin; +} + +int +mpd_getround(const mpd_context_t *ctx) +{ + return ctx->round; +} + +uint32_t +mpd_gettraps(const mpd_context_t *ctx) +{ + return ctx->traps; +} + +uint32_t +mpd_getstatus(const mpd_context_t *ctx) +{ + return ctx->status; +} + +int +mpd_getclamp(const mpd_context_t *ctx) +{ + return ctx->clamp; +} + +int +mpd_getcr(const mpd_context_t *ctx) +{ + return ctx->allcr; +} + + +int +mpd_qsetprec(mpd_context_t *ctx, mpd_ssize_t prec) +{ + if (prec <= 0 || prec > MPD_MAX_PREC) { + return 0; + } + ctx->prec = prec; + return 1; +} + +int +mpd_qsetemax(mpd_context_t *ctx, mpd_ssize_t emax) +{ + if (emax < 0 || emax > MPD_MAX_EMAX) { + return 0; + } + ctx->emax = emax; + return 1; +} + +int +mpd_qsetemin(mpd_context_t *ctx, mpd_ssize_t emin) +{ + if (emin > 0 || emin < MPD_MIN_EMIN) { + return 0; + } + ctx->emin = emin; + return 1; +} + +int +mpd_qsetround(mpd_context_t *ctx, int round) +{ + if (!(0 <= round && round < MPD_ROUND_GUARD)) { + return 0; + } + ctx->round = round; + return 1; +} + +int +mpd_qsettraps(mpd_context_t *ctx, uint32_t traps) +{ + if (traps > MPD_Max_status) { + return 0; + } + ctx->traps = traps; + return 1; +} + +int +mpd_qsetstatus(mpd_context_t *ctx, uint32_t flags) +{ + if (flags > MPD_Max_status) { + return 0; + } + ctx->status = flags; + return 1; +} + +int +mpd_qsetclamp(mpd_context_t *ctx, int c) +{ + if (c != 0 && c != 1) { + return 0; + } + ctx->clamp = c; + return 1; +} + +int +mpd_qsetcr(mpd_context_t *ctx, int c) +{ + if (c != 0 && c != 1) { + return 0; + } + ctx->allcr = c; + return 1; +} + + +void +mpd_addstatus_raise(mpd_context_t *ctx, uint32_t flags) +{ + ctx->status |= flags; + if (flags&ctx->traps) { + ctx->newtrap = (flags&ctx->traps); + mpd_traphandler(ctx); + } +} + + diff --git a/rpython/translator/c/src/libmpdec/convolute.c b/rpython/translator/c/src/libmpdec/convolute.c new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/convolute.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#include "mpdecimal.h" +#include +#include "bits.h" +#include "constants.h" +#include "fnt.h" +#include "fourstep.h" +#include "numbertheory.h" +#include "sixstep.h" +#include "umodarith.h" +#include "convolute.h" + + +/* Bignum: Fast convolution using the Number Theoretic Transform. Used for + the multiplication of very large coefficients. */ + + +/* Convolute the data in c1 and c2. Result is in c1. */ +int +fnt_convolute(mpd_uint_t *c1, mpd_uint_t *c2, mpd_size_t n, int modnum) +{ + int (*fnt)(mpd_uint_t *, mpd_size_t, int); + int (*inv_fnt)(mpd_uint_t *, mpd_size_t, int); +#ifdef PPRO + double dmod; + uint32_t dinvmod[3]; +#endif + mpd_uint_t n_inv, umod; + mpd_size_t i; + + + SETMODULUS(modnum); + n_inv = POWMOD(n, (umod-2)); + + if (ispower2(n)) { + if (n > SIX_STEP_THRESHOLD) { + fnt = six_step_fnt; + inv_fnt = inv_six_step_fnt; + } + else { + fnt = std_fnt; + inv_fnt = std_inv_fnt; + } + } + else { + fnt = four_step_fnt; + inv_fnt = inv_four_step_fnt; + } + + if (!fnt(c1, n, modnum)) { + return 0; + } + if (!fnt(c2, n, modnum)) { + return 0; + } + for (i = 0; i < n-1; i += 2) { + mpd_uint_t x0 = c1[i]; + mpd_uint_t y0 = c2[i]; + mpd_uint_t x1 = c1[i+1]; + mpd_uint_t y1 = c2[i+1]; + MULMOD2(&x0, y0, &x1, y1); + c1[i] = x0; + c1[i+1] = x1; + } + + if (!inv_fnt(c1, n, modnum)) { + return 0; + } + for (i = 0; i < n-3; i += 4) { + mpd_uint_t x0 = c1[i]; + mpd_uint_t x1 = c1[i+1]; + mpd_uint_t x2 = c1[i+2]; + mpd_uint_t x3 = c1[i+3]; + MULMOD2C(&x0, &x1, n_inv); + MULMOD2C(&x2, &x3, n_inv); + c1[i] = x0; + c1[i+1] = x1; + c1[i+2] = x2; + c1[i+3] = x3; + } + + return 1; +} + +/* Autoconvolute the data in c1. Result is in c1. */ +int +fnt_autoconvolute(mpd_uint_t *c1, mpd_size_t n, int modnum) +{ + int (*fnt)(mpd_uint_t *, mpd_size_t, int); + int (*inv_fnt)(mpd_uint_t *, mpd_size_t, int); +#ifdef PPRO + double dmod; + uint32_t dinvmod[3]; +#endif + mpd_uint_t n_inv, umod; + mpd_size_t i; + + + SETMODULUS(modnum); + n_inv = POWMOD(n, (umod-2)); + + if (ispower2(n)) { + if (n > SIX_STEP_THRESHOLD) { + fnt = six_step_fnt; + inv_fnt = inv_six_step_fnt; + } + else { + fnt = std_fnt; + inv_fnt = std_inv_fnt; + } + } + else { + fnt = four_step_fnt; + inv_fnt = inv_four_step_fnt; + } + + if (!fnt(c1, n, modnum)) { + return 0; + } + for (i = 0; i < n-1; i += 2) { + mpd_uint_t x0 = c1[i]; + mpd_uint_t x1 = c1[i+1]; + MULMOD2(&x0, x0, &x1, x1); + c1[i] = x0; + c1[i+1] = x1; + } + + if (!inv_fnt(c1, n, modnum)) { + return 0; + } + for (i = 0; i < n-3; i += 4) { + mpd_uint_t x0 = c1[i]; + mpd_uint_t x1 = c1[i+1]; + mpd_uint_t x2 = c1[i+2]; + mpd_uint_t x3 = c1[i+3]; + MULMOD2C(&x0, &x1, n_inv); + MULMOD2C(&x2, &x3, n_inv); + c1[i] = x0; + c1[i+1] = x1; + c1[i+2] = x2; + c1[i+3] = x3; + } + + return 1; +} + + diff --git a/rpython/translator/c/src/libmpdec/convolute.h b/rpython/translator/c/src/libmpdec/convolute.h new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/libmpdec/convolute.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2008-2012 Stefan Krah. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE From noreply at buildbot.pypy.org Sun May 11 00:27:33 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:33 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add _decimal.Decimal, empty. Message-ID: <20140510222733.46F3E1C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71446:3a643e398e1d Date: 2014-05-01 21:30 +0200 http://bitbucket.org/pypy/pypy/changeset/3a643e398e1d/ Log: Add _decimal.Decimal, empty. diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -5,5 +5,6 @@ } interpleveldefs = { + 'Decimal': 'interp_decimal.W_Decimal', 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', } diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1,3 +1,13 @@ from rpython.rlib import rmpdec +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict, + descr_set_dict, descr_del_dict) + IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS + +class W_Decimal(W_Root): + pass + +W_Decimal.typedef = TypeDef( + 'Decimal') diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -4,3 +4,7 @@ def test_constants(self): import _decimal assert _decimal.IEEE_CONTEXT_MAX_BITS > 3 + + def test_type(self): + import _decimal + assert isinstance(_decimal.Decimal, type) From noreply at buildbot.pypy.org Sun May 11 00:27:34 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:34 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Start decimal.context, so far only with a skeleton of the "flags" attribute. Message-ID: <20140510222734.97BD01C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71447:a858f7de905a Date: 2014-05-01 22:42 +0200 http://bitbucket.org/pypy/pypy/changeset/a858f7de905a/ Log: Start decimal.context, so far only with a skeleton of the "flags" attribute. diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -6,5 +6,6 @@ interpleveldefs = { 'Decimal': 'interp_decimal.W_Decimal', + 'getcontext': 'interp_context.getcontext', 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', } diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/interp_context.py @@ -0,0 +1,61 @@ +from rpython.rlib import rmpdec +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w) +from pypy.interpreter.executioncontext import ExecutionContext + + +# The SignalDict is a MutableMapping that provides access to the +# mpd_context_t flags, which reside in the context object. +# When a new context is created, context.traps and context.flags are +# initialized to new SignalDicts. +# Once a SignalDict is tied to a context, it cannot be deleted. +class W_SignalDictMixin(W_Root): + pass + +def descr_new_signaldict(space, w_subtype): + w_result = space.allocate_instance(W_SignalDictMixin, w_subtype) + W_SignalDictMixin.__init__(w_result) + return w_result + +W_SignalDictMixin.typedef = TypeDef( + 'SignalDictMixin', + __new__ = interp2app(descr_new_signaldict), + ) + + +class State: + def __init__(self, space): + w_import = space.builtin.get('__import__') + w_collections = space.call_function(w_import, + space.wrap('collections')) + w_MutableMapping = space.getattr(w_collections, + space.wrap('MutableMapping')) + self.W_SignalDict = space.call_function( + space.w_type, space.wrap("SignalDict"), + space.newtuple([space.gettypeobject(W_SignalDictMixin.typedef), + w_MutableMapping]), + space.newdict()) + +def state_get(space): + return space.fromcache(State) + + +class W_Context(W_Root): + def __init__(self, space): + self.w_flags = space.call_function(state_get(space).W_SignalDict) + +W_Context.typedef = TypeDef( + 'Context', + flags=interp_attrproperty_w('w_flags', W_Context), + ) + + +ExecutionContext.decimal_context = None + +def getcontext(space): + ec = space.getexecutioncontext() + if not ec.decimal_context: + # Set up a new thread local context + ec.decimal_context = W_Context(space) + return ec.decimal_context diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -8,3 +8,11 @@ def test_type(self): import _decimal assert isinstance(_decimal.Decimal, type) + + def test_contextflags(self): + import _decimal + from collections.abc import MutableMapping + flags = _decimal.getcontext().flags + assert type(flags).__name__ == 'SignalDict' + bases = type(flags).__bases__ + assert bases[1] is MutableMapping From noreply at buildbot.pypy.org Sun May 11 00:27:36 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:36 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add decimal "Signals", i.e. exceptions. Message-ID: <20140510222736.04CE61C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71448:2ca4490e70b9 Date: 2014-05-01 23:03 +0200 http://bitbucket.org/pypy/pypy/changeset/2ca4490e70b9/ Log: Add decimal "Signals", i.e. exceptions. diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -7,5 +7,11 @@ interpleveldefs = { 'Decimal': 'interp_decimal.W_Decimal', 'getcontext': 'interp_context.getcontext', + 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', } + for name in ('DecimalException', 'Clamped', 'Rounded', 'Inexact', + 'Subnormal', 'Underflow', 'Overflow', 'DivisionByZero', + 'InvalidOperation', 'FloatOperation'): + interpleveldefs[name] = 'interp_signals.get(space).w_%s' % name + diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/interp_signals.py @@ -0,0 +1,50 @@ +class SignalState: + def __init__(self, space): + self.w_DecimalException = space.call_function( + space.w_type, space.wrap("DecimalException"), + space.newtuple([space.w_ArithmeticError]), + space.newdict()) + self.w_Clamped = space.call_function( + space.w_type, space.wrap("Clamped"), + space.newtuple([self.w_DecimalException]), + space.newdict()) + self.w_Rounded = space.call_function( + space.w_type, space.wrap("Rounded"), + space.newtuple([self.w_DecimalException]), + space.newdict()) + self.w_Inexact = space.call_function( + space.w_type, space.wrap("Inexact"), + space.newtuple([self.w_DecimalException]), + space.newdict()) + self.w_Subnormal = space.call_function( + space.w_type, space.wrap("Subnormal"), + space.newtuple([self.w_DecimalException]), + space.newdict()) + self.w_Underflow = space.call_function( + space.w_type, space.wrap("Underflow"), + space.newtuple([self.w_Inexact, + self.w_Rounded, + self.w_Subnormal]), + space.newdict()) + self.w_Overflow = space.call_function( + space.w_type, space.wrap("Overflow"), + space.newtuple([self.w_Inexact, + self.w_Rounded]), + space.newdict()) + self.w_DivisionByZero = space.call_function( + space.w_type, space.wrap("DivisionByZero"), + space.newtuple([self.w_DecimalException, + space.w_ZeroDivisionError]), + space.newdict()) + self.w_InvalidOperation = space.call_function( + space.w_type, space.wrap("InvalidOperation"), + space.newtuple([self.w_DecimalException]), + space.newdict()) + self.w_FloatOperation = space.call_function( + space.w_type, space.wrap("FloatOperation"), + space.newtuple([self.w_DecimalException, + space.w_TypeError]), + space.newdict()) + +def get(space): + return space.fromcache(SignalState) diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -16,3 +16,13 @@ assert type(flags).__name__ == 'SignalDict' bases = type(flags).__bases__ assert bases[1] is MutableMapping + + def test_exceptions(self): + import _decimal + for name in ('Clamped', 'Rounded', 'Inexact', 'Subnormal', + 'Underflow', 'Overflow', 'DivisionByZero', + 'InvalidOperation', 'FloatOperation'): + ex = getattr(_decimal, name) + assert issubclass(ex, _decimal.DecimalException) + assert issubclass(ex, ArithmeticError) + From noreply at buildbot.pypy.org Sun May 11 00:27:37 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:37 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add enough of context constructor, Message-ID: <20140510222737.504B01C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71449:494911ddea62 Date: 2014-05-02 01:04 +0200 http://bitbucket.org/pypy/pypy/changeset/494911ddea62/ Log: Add enough of context constructor, so that test_decimal can be imported and run tests. diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -1,4 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule +from rpython.rlib import rmpdec class Module(MixedModule): appleveldefs = { @@ -6,10 +7,15 @@ interpleveldefs = { 'Decimal': 'interp_decimal.W_Decimal', + 'Context': 'interp_context.W_Context', 'getcontext': 'interp_context.getcontext', + 'setcontext': 'interp_context.setcontext', 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', } + for name in rmpdec.ROUND_CONSTANTS: + interpleveldefs[name] = 'space.wrap(%r)' % ( + getattr(rmpdec, 'MPD_' + name),) for name in ('DecimalException', 'Clamped', 'Rounded', 'Inexact', 'Subnormal', 'Underflow', 'Overflow', 'DivisionByZero', 'InvalidOperation', 'FloatOperation'): diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -45,9 +45,21 @@ def __init__(self, space): self.w_flags = space.call_function(state_get(space).W_SignalDict) + def copy_w(self, space): + w_copy = W_Context(space) + # XXX incomplete + return w_copy + +def descr_new_context(space, w_subtype, __args__): + w_result = space.allocate_instance(W_Context, w_subtype) + W_Context.__init__(w_result, space) + return w_result + W_Context.typedef = TypeDef( 'Context', + copy=interp2app(W_Context.copy_w), flags=interp_attrproperty_w('w_flags', W_Context), + __new__ = interp2app(descr_new_context), ) @@ -59,3 +71,7 @@ # Set up a new thread local context ec.decimal_context = W_Context(space) return ec.decimal_context + +def setcontext(space, w_context): + ec = space.getexecutioncontext() + ec.decimal_context = w_context diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -9,6 +9,14 @@ import _decimal assert isinstance(_decimal.Decimal, type) + def test_context(self): + import _decimal + context = _decimal.Context( + prec=9, rounding=_decimal.ROUND_HALF_EVEN, + traps=dict.fromkeys(_decimal.getcontext().flags.keys(), 0)) + _decimal.setcontext(context) + assert _decimal.getcontext() is context + def test_contextflags(self): import _decimal from collections.abc import MutableMapping diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -34,10 +34,21 @@ libraries=['m'], ) + +ROUND_CONSTANTS = ( + 'ROUND_UP', 'ROUND_DOWN', 'ROUND_CEILING', 'ROUND_FLOOR', + 'ROUND_HALF_UP', 'ROUND_HALF_DOWN', 'ROUND_HALF_EVEN', + 'ROUND_05UP', 'ROUND_TRUNC') + class CConfig: _compilation_info_ = eci MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') + for name in ROUND_CONSTANTS: + name = 'MPD_' + name + locals()[name] = platform.ConstantInteger(name) + + globals().update(platform.configure(CConfig)) diff --git a/rpython/rlib/test/test_rmpdec.py b/rpython/rlib/test/test_rmpdec.py --- a/rpython/rlib/test/test_rmpdec.py +++ b/rpython/rlib/test/test_rmpdec.py @@ -1,1 +1,6 @@ from rpython.rlib import rmpdec + +class TestMpdec: + def test_constants(self): + assert 'ROUND_HALF_EVEN' in rmpdec.ROUND_CONSTANTS + assert isinstance(rmpdec.MPD_ROUND_HALF_EVEN, int) From noreply at buildbot.pypy.org Sun May 11 00:27:38 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:38 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More context methods, will be able to start test_decimal. Message-ID: <20140510222738.919BB1C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71450:2ea4370d6126 Date: 2014-05-02 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/2ea4370d6126/ Log: More context methods, will be able to start test_decimal. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -37,7 +37,7 @@ "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy" - "faulthandler", + "faulthandler", "_decimal", ]) translation_modules = default_modules.copy() diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -12,10 +12,10 @@ 'setcontext': 'interp_context.setcontext', 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', + 'MAX_PREC': 'space.wrap(interp_decimal.MAX_PREC)', } for name in rmpdec.ROUND_CONSTANTS: - interpleveldefs[name] = 'space.wrap(%r)' % ( - getattr(rmpdec, 'MPD_' + name),) + interpleveldefs[name] = 'space.wrap(%r)' % name for name in ('DecimalException', 'Clamped', 'Rounded', 'Inexact', 'Subnormal', 'Underflow', 'Overflow', 'DivisionByZero', 'InvalidOperation', 'FloatOperation'): diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -1,7 +1,11 @@ from rpython.rlib import rmpdec +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import (TypeDef, interp_attrproperty_w) +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import ( + TypeDef, GetSetProperty, interp_attrproperty_w) from pypy.interpreter.executioncontext import ExecutionContext @@ -40,16 +44,80 @@ def state_get(space): return space.fromcache(State) +ROUND_CONSTANTS = unrolling_iterable([ + (name, getattr(rmpdec, 'MPD_' + name)) + for name in rmpdec.ROUND_CONSTANTS]) class W_Context(W_Root): def __init__(self, space): + self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, 1, flavor='raw', + track_allocation=False) self.w_flags = space.call_function(state_get(space).W_SignalDict) + def __del__(self): + if self.ctx: + lltype.free(self.ctx, flavor='raw') + def copy_w(self, space): w_copy = W_Context(space) # XXX incomplete return w_copy + def get_prec(self, space): + return space.wrap(rmpdec.mpd_getprec(self.ctx)) + + def set_prec(self, space, w_prec): + prec = space.int_w(w_prec) + if not rmpdec.mpd_qsetprec(self.ctx, prec): + raise oefmt(space.w_ValueError, + "valid range for prec is [1, MAX_PREC]") + + def get_rounding(self, space): + return space.wrap(rmpdec.mpd_getround(self.ctx)) + + def set_rounding(self, space, w_rounding): + rounding = space.str_w(w_rounding) + for name, value in ROUND_CONSTANTS: + if name == rounding: + break + else: + raise oefmt(space.w_TypeError, + "valid values for rounding are: " + "[ROUND_CEILING, ROUND_FLOOR, ROUND_UP, ROUND_DOWN," + "ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN," + "ROUND_05UP]") + if not rmpdec.mpd_qsetround(self.ctx, value): + raise oefmt(space.w_RuntimeError, + "internal error in context.set_rounding") + + def get_emin(self, space): + return space.wrap(rmpdec.mpd_getemin(self.ctx)) + + def set_emin(self, space, w_emin): + emin = space.int_w(w_emin) + if not rmpdec.mpd_qsetemin(self.ctx, emin): + raise oefmt(space.w_ValueError, + "valid range for Emin is [MIN_EMIN, 0]") + + def get_emax(self, space): + return space.wrap(rmpdec.mpd_getemax(self.ctx)) + + def set_emax(self, space, w_emax): + emax = space.int_w(w_emax) + if not rmpdec.mpd_qsetemax(self.ctx, emax): + raise oefmt(space.w_ValueError, + "valid range for Emax is [0, MAX_EMAX]") + + def get_clamp(self, space): + return space.wrap(rmpdec.mpd_getclamp(self.ctx)) + + def set_clamp(self, space, w_clamp): + clamp = space.c_int_w(w_clamp) + if not rmpdec.mpd_qsetclamp(self.ctx, clamp): + raise oefmt(space.w_ValueError, + "valid values for clamp are 0 or 1") + + def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) W_Context.__init__(w_result, space) @@ -59,6 +127,11 @@ 'Context', copy=interp2app(W_Context.copy_w), flags=interp_attrproperty_w('w_flags', W_Context), + prec=GetSetProperty(W_Context.get_prec, W_Context.set_prec), + rounding=GetSetProperty(W_Context.get_rounding, W_Context.set_rounding), + Emin=GetSetProperty(W_Context.get_emin, W_Context.set_emin), + Emax=GetSetProperty(W_Context.get_emax, W_Context.set_emax), + clamp=GetSetProperty(W_Context.get_clamp, W_Context.set_clamp), __new__ = interp2app(descr_new_context), ) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -5,6 +5,7 @@ IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS +MAX_PREC = rmpdec.MPD_MAX_PREC class W_Decimal(W_Root): pass diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -25,6 +25,20 @@ bases = type(flags).__bases__ assert bases[1] is MutableMapping + def test_context_changes(self): + import _decimal + context = _decimal.getcontext() + context.prec + context.prec = 30 + context.rounding + context.rounding = _decimal.ROUND_HALF_UP + context.Emin + context.Emin = -100 + context.Emax + context.Emax = 100 + context.clamp + context.clamp = 1 + def test_exceptions(self): import _decimal for name in ('Clamped', 'Rounded', 'Inexact', 'Subnormal', diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -2,6 +2,7 @@ import sys from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.tool import rffi_platform as platform from rpython.conftest import cdir @@ -10,8 +11,11 @@ compile_extra = [] if sys.maxsize > 1<<32: compile_extra.append("-DCONFIG_64") + # This suppose a x64 platform with gcc inline assembler. + compile_extra.append("-DASM") else: compile_extra.append("-DCONFIG_32") + compile_extra.append("-DANSI") eci = ExternalCompilationInfo( includes=['src/libmpdec/mpdecimal.h'], @@ -30,6 +34,12 @@ libdir.join('crt.c'), libdir.join('memory.c'), ], + export_symbols=[ + "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", + "mpd_getclamp", + "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", + "mpd_qsetclamp", + ], compile_extra=compile_extra, libraries=['m'], ) @@ -45,10 +55,42 @@ MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') + MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') for name in ROUND_CONSTANTS: name = 'MPD_' + name locals()[name] = platform.ConstantInteger(name) + MPD_CONTEXT_T = platform.Struct('mpd_context_t', + []) globals().update(platform.configure(CConfig)) + + +def external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, compilation_info=eci, **kwds) + +MPD_CONTEXT_PTR = rffi.CArrayPtr(MPD_CONTEXT_T) + +mpd_getprec = external( + 'mpd_getprec', [MPD_CONTEXT_PTR], rffi.SSIZE_T) +mpd_getemin = external( + 'mpd_getemin', [MPD_CONTEXT_PTR], rffi.SSIZE_T) +mpd_getemax = external( + 'mpd_getemax', [MPD_CONTEXT_PTR], rffi.SSIZE_T) +mpd_getround = external( + 'mpd_getround', [MPD_CONTEXT_PTR], rffi.INT) +mpd_getclamp = external( + 'mpd_getclamp', [MPD_CONTEXT_PTR], rffi.INT) + +mpd_qsetprec = external( + 'mpd_qsetprec', [MPD_CONTEXT_PTR, rffi.SSIZE_T], rffi.INT) +mpd_qsetemin = external( + 'mpd_qsetemin', [MPD_CONTEXT_PTR, rffi.SSIZE_T], rffi.INT) +mpd_qsetemax = external( + 'mpd_qsetemax', [MPD_CONTEXT_PTR, rffi.SSIZE_T], rffi.INT) +mpd_qsetround = external( + 'mpd_qsetround', [MPD_CONTEXT_PTR, rffi.INT], rffi.INT) +mpd_qsetclamp = external( + 'mpd_qsetclamp', [MPD_CONTEXT_PTR, rffi.INT], rffi.INT) + diff --git a/rpython/translator/c/src/libmpdec/README-pypy.txt b/rpython/translator/c/src/libmpdec/README-pypy.txt --- a/rpython/translator/c/src/libmpdec/README-pypy.txt +++ b/rpython/translator/c/src/libmpdec/README-pypy.txt @@ -1,3 +1,5 @@ This libmpdec directory was directly copied from CPython. -pyconfig.h was added, with a default configuration which works on Linux. +- pyconfig.h was added, with a default configuration which works on Linux. + +- in mpdecimal.h the "MPD_HIDE_SYMBOLS" pragmas have been disabled. diff --git a/rpython/translator/c/src/libmpdec/mpdecimal.h b/rpython/translator/c/src/libmpdec/mpdecimal.h --- a/rpython/translator/c/src/libmpdec/mpdecimal.h +++ b/rpython/translator/c/src/libmpdec/mpdecimal.h @@ -86,7 +86,8 @@ /* This header file is internal for the purpose of building _decimal.so. * All symbols should have local scope in the DSO. */ -MPD_PRAGMA(MPD_HIDE_SYMBOLS_START) +/* Removed for PyPy */ +// MPD_PRAGMA(MPD_HIDE_SYMBOLS_START) #if !defined(LEGACY_COMPILER) @@ -807,7 +808,8 @@ int mpd_resize_zero(mpd_t *result, mpd_ssize_t size, mpd_context_t *ctx); -MPD_PRAGMA(MPD_HIDE_SYMBOLS_END) /* restore previous scope rules */ +/* Removed for PyPy */ +// MPD_PRAGMA(MPD_HIDE_SYMBOLS_END) /* restore previous scope rules */ #ifdef __cplusplus From noreply at buildbot.pypy.org Sun May 11 00:27:39 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:39 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Really start the Decimal type: some constructors. Message-ID: <20140510222739.EFAB41C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71451:563cb51db85c Date: 2014-05-02 23:59 +0200 http://bitbucket.org/pypy/pypy/changeset/563cb51db85c/ Log: Really start the Decimal type: some constructors. diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -1,5 +1,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rmpdec +from pypy.module._decimal import interp_signals + class Module(MixedModule): appleveldefs = { @@ -16,8 +18,6 @@ } for name in rmpdec.ROUND_CONSTANTS: interpleveldefs[name] = 'space.wrap(%r)' % name - for name in ('DecimalException', 'Clamped', 'Rounded', 'Inexact', - 'Subnormal', 'Underflow', 'Overflow', 'DivisionByZero', - 'InvalidOperation', 'FloatOperation'): + for name in interp_signals.SIGNAL_NAMES: interpleveldefs[name] = 'interp_signals.get(space).w_%s' % name diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -7,6 +7,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, interp_attrproperty_w) from pypy.interpreter.executioncontext import ExecutionContext +from pypy.module._decimal import interp_signals # The SignalDict is a MutableMapping that provides access to the @@ -50,13 +51,23 @@ class W_Context(W_Root): def __init__(self, space): - self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, 1, flavor='raw', + self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, flavor='raw', + zero=True, track_allocation=False) self.w_flags = space.call_function(state_get(space).W_SignalDict) def __del__(self): if self.ctx: - lltype.free(self.ctx, flavor='raw') + lltype.free(self.ctx, flavor='raw', track_allocation=False) + + def addstatus(self, space, status): + "Add resulting status to context, and eventually raise an exception." + self.ctx.c_status |= status + if status & rmpdec.MPD_Malloc_error: + raise OperationError(space.w_MemoryError, space.w_None) + if status & self.ctx.c_traps: + raise interp_signals.flags_as_exception( + space, self.ctx.c_traps & status) def copy_w(self, space): w_copy = W_Context(space) @@ -148,3 +159,20 @@ def setcontext(space, w_context): ec = space.getexecutioncontext() ec.decimal_context = w_context + +class ConvContext: + def __init__(self, context, exact): + self.exact = exact + if self.exact: + self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, flavor='raw', + zero=True) + rmpdec.mpd_maxcontext(self.ctx) + else: + self.ctx = context.ctx + + def __enter__(self): + return self.ctx + + def __exit__(self, *args): + if self.exact: + lltype.free(self.ctx, flavor='raw') diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1,14 +1,101 @@ from rpython.rlib import rmpdec +from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict, descr_set_dict, descr_del_dict) +from pypy.module._decimal import interp_context IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS MAX_PREC = rmpdec.MPD_MAX_PREC +# DEC_MINALLOC >= MPD_MINALLOC +DEC_MINALLOC = 4 + +def ensure_context(space, w_context): + context = space.interp_w(interp_context.W_Context, w_context, + can_be_None=True) + if context is None: + context = interp_context.getcontext(space) + return context class W_Decimal(W_Root): - pass + hash = -1 + + def __init__(self, space): + self.mpd = lltype.malloc(rmpdec.MPD_PTR.TO, flavor='raw') + self.data = lltype.malloc(rffi.UINTP.TO, DEC_MINALLOC, flavor='raw') + rffi.setintfield(self.mpd, 'c_flags', + rmpdec.MPD_STATIC | rmpdec.MPD_STATIC_DATA) + self.mpd.c_exp = 0 + self.mpd.c_digits = 0 + self.mpd.c_len = 0 + self.mpd.c_alloc = DEC_MINALLOC + self.mpd.c_data = self.data + + def __del__(self): + if self.mpd: + lltype.free(self.mpd, flavor='raw') + if self.data: + lltype.free(self.data, flavor='raw') + + def compare(self, space, w_other, op): + if not isinstance(w_other, W_Decimal): # So far + return space.w_NotImplemented + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: + r = rmpdec.mpd_qcmp(self.mpd, w_other.mpd, status_ptr) + if op == 'eq': + return space.wrap(r == 0) + else: + return space.w_NotImplemented + + def descr_eq(self, space, w_other): + return self.compare(space, w_other, 'eq') + + +# Constructors +def decimal_from_ssize(space, w_subtype, value, context, exact=True): + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: + with interp_context.ConvContext(context, exact) as ctx: + rmpdec.mpd_qset_ssize(w_result.mpd, value, ctx, status_ptr) + context.addstatus(space, status_ptr[0]) + + return w_result + +def decimal_from_cstring(space, w_subtype, value, context, exact=True): + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: + with interp_context.ConvContext(context, exact) as ctx: + rmpdec.mpd_qset_string(w_result.mpd, value, ctx, status_ptr) + context.addstatus(space, status_ptr[0]) + return w_result + +def decimal_from_unicode(space, w_subtype, w_value, context, exact=True, + strip_whitespace=True): + s = space.str_w(w_value) # XXX numeric_as_ascii() is different + if strip_whitespace: + s = s.strip() + return decimal_from_cstring(space, w_subtype, s, context, exact=exact) + +def decimal_from_object(space, w_subtype, w_value, context, exact=True): + if w_value is None: + return decimal_from_ssize(space, w_subtype, 0, context, exact=exact) + elif space.isinstance_w(w_value, space.w_unicode): + return decimal_from_unicode(space, w_subtype, w_value, context, + exact=True, strip_whitespace=True) + + at unwrap_spec(w_context=WrappedDefault(None)) +def descr_new_decimal(space, w_subtype, w_value=None, w_context=None): + context = ensure_context(space, w_context) + return decimal_from_object(space, w_subtype, w_value, context, + exact=True) W_Decimal.typedef = TypeDef( - 'Decimal') + 'Decimal', + __new__ = interp2app(descr_new_decimal), + __eq__ = interp2app(W_Decimal.descr_eq), + ) diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -1,3 +1,12 @@ +SIGNAL_NAMES = ( + 'DecimalException', 'Clamped', 'Rounded', 'Inexact', + 'Subnormal', 'Underflow', 'Overflow', 'DivisionByZero', + 'InvalidOperation', 'FloatOperation') + +def flags_as_exception(space, flags): + raise ValueError(hex(flags)) + + class SignalState: def __init__(self, space): self.w_DecimalException = space.call_function( diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/test/test_decimal.py @@ -0,0 +1,8 @@ +class AppTestExplicitConstruction: + spaceconfig = dict(usemodules=('_decimal',)) + + def test_explicit_empty(self): + import _decimal + Decimal = _decimal.Decimal + assert Decimal() == Decimal("0") + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -2,7 +2,7 @@ import sys from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform as platform from rpython.conftest import cdir @@ -25,6 +25,7 @@ libdir.join('convolute.c'), libdir.join('constants.c'), libdir.join('context.c'), + libdir.join('io.c'), libdir.join('fourstep.c'), libdir.join('sixstep.c'), libdir.join('transpose.c'), @@ -35,10 +36,11 @@ libdir.join('memory.c'), ], export_symbols=[ - "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", - "mpd_getclamp", - "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", - "mpd_qsetclamp", + "mpd_qset_ssize", "mpd_qset_string", + "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", + "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", + "mpd_maxcontext", + "mpd_qcmp", ], compile_extra=compile_extra, libraries=['m'], @@ -56,13 +58,28 @@ MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') + MPD_STATIC = platform.ConstantInteger('MPD_STATIC') + MPD_STATIC_DATA = platform.ConstantInteger('MPD_STATIC_DATA') + + MPD_Malloc_error = platform.ConstantInteger('MPD_Malloc_error') for name in ROUND_CONSTANTS: name = 'MPD_' + name locals()[name] = platform.ConstantInteger(name) + MPD_T = platform.Struct('mpd_t', + [('flags', rffi.UINT), + ('exp', rffi.SSIZE_T), + ('digits', rffi.SSIZE_T), + ('len', rffi.SSIZE_T), + ('alloc', rffi.SSIZE_T), + ('data', rffi.UINTP), + ]) MPD_CONTEXT_T = platform.Struct('mpd_context_t', - []) + [('traps', rffi.UINT), + ('status', rffi.UINT), + ]) + globals().update(platform.configure(CConfig)) @@ -70,8 +87,16 @@ def external(name, args, result, **kwds): return rffi.llexternal(name, args, result, compilation_info=eci, **kwds) -MPD_CONTEXT_PTR = rffi.CArrayPtr(MPD_CONTEXT_T) +MPD_PTR = lltype.Ptr(MPD_T) +MPD_CONTEXT_PTR = lltype.Ptr(MPD_CONTEXT_T) +# Initialization +mpd_qset_ssize = external( + 'mpd_qset_ssize', [MPD_PTR, rffi.SSIZE_T, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qset_string = external( + 'mpd_qset_string', [MPD_PTR, rffi.CCHARP, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) + +# Context operations mpd_getprec = external( 'mpd_getprec', [MPD_CONTEXT_PTR], rffi.SSIZE_T) mpd_getemin = external( @@ -94,3 +119,9 @@ mpd_qsetclamp = external( 'mpd_qsetclamp', [MPD_CONTEXT_PTR, rffi.INT], rffi.INT) +mpd_maxcontext = external( + 'mpd_maxcontext', [MPD_CONTEXT_PTR], lltype.Void) + +# Operations +mpd_qcmp = external( + 'mpd_qcmp', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) From noreply at buildbot.pypy.org Sun May 11 00:27:41 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:41 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More constructors to Decimal, add __str__ Message-ID: <20140510222741.47FF31C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71452:46b622916a28 Date: 2014-05-05 21:22 +0200 http://bitbucket.org/pypy/pypy/changeset/46b622916a28/ Log: More constructors to Decimal, add __str__ diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -434,11 +434,8 @@ self._value = value self.setup(w_type) - def get_w_value(self, space): - w_value = self._w_value - if w_value is None: - self._w_value = w_value = space.wrap(self._value) - return w_value + def _compute_value(self, space): + return self._value @specialize.memo() def get_operr_class(valuefmt): diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -12,12 +12,13 @@ 'Context': 'interp_context.W_Context', 'getcontext': 'interp_context.getcontext', 'setcontext': 'interp_context.setcontext', + 'DecimalException': 'interp_signals.get(space).w_DecimalException', 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', 'MAX_PREC': 'space.wrap(interp_decimal.MAX_PREC)', } for name in rmpdec.ROUND_CONSTANTS: interpleveldefs[name] = 'space.wrap(%r)' % name - for name in interp_signals.SIGNAL_NAMES: + for name, flag in interp_signals.SIGNAL_MAP: interpleveldefs[name] = 'interp_signals.get(space).w_%s' % name diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -1,7 +1,7 @@ from rpython.rlib import rmpdec from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import ( @@ -55,6 +55,7 @@ zero=True, track_allocation=False) self.w_flags = space.call_function(state_get(space).W_SignalDict) + self.capitals = 0 def __del__(self): if self.ctx: @@ -62,12 +63,15 @@ def addstatus(self, space, status): "Add resulting status to context, and eventually raise an exception." - self.ctx.c_status |= status - if status & rmpdec.MPD_Malloc_error: + new_status = (rffi.cast(lltype.Signed, status) | + rffi.cast(lltype.Signed, self.ctx.c_status)) + self.ctx.c_status = rffi.cast(rffi.UINT, new_status) + if new_status & rmpdec.MPD_Malloc_error: raise OperationError(space.w_MemoryError, space.w_None) - if status & self.ctx.c_traps: - raise interp_signals.flags_as_exception( - space, self.ctx.c_traps & status) + to_trap = (rffi.cast(lltype.Signed, status) & + rffi.cast(lltype.Signed, self.ctx.c_traps)) + if to_trap: + raise interp_signals.flags_as_exception(space, to_trap) def copy_w(self, space): w_copy = W_Context(space) @@ -158,21 +162,45 @@ def setcontext(space, w_context): ec = space.getexecutioncontext() - ec.decimal_context = w_context + ec.decimal_context = space.interp_w(W_Context, w_context) + +def ensure_context(space, w_context): + context = space.interp_w(W_Context, w_context, + can_be_None=True) + if context is None: + context = getcontext(space) + return context class ConvContext: - def __init__(self, context, exact): + def __init__(self, space, mpd, context, exact): + self.space = space + self.mpd = mpd + self.context = context self.exact = exact + + def __enter__(self): if self.exact: self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, flavor='raw', zero=True) rmpdec.mpd_maxcontext(self.ctx) else: - self.ctx = context.ctx - - def __enter__(self): - return self.ctx + self.ctx = self.context.ctx + self.status_ptr = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 1, + flavor='raw', zero=True) + return self.ctx, self.status_ptr def __exit__(self, *args): if self.exact: lltype.free(self.ctx, flavor='raw') + # we want exact results + status = rffi.cast(lltype.Signed, self.status_ptr[0]) + if status & (rmpdec.MPD_Inexact | + rmpdec.MPD_Rounded | + rmpdec.MPD_Clamped): + rmpdec.seterror(self.mpd, + rmpdec.MPD_Invalid_operation, status_ptr) + status = rffi.cast(lltype.Signed, self.status_ptr[0]) + lltype.free(self.status_ptr, flavor='raw') + status &= rmpdec.MPD_Errors + # May raise a DecimalException + self.context.addstatus(self.space, status) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1,6 +1,7 @@ -from rpython.rlib import rmpdec +from rpython.rlib import rmpdec, rarithmetic, rbigint from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict, descr_set_dict, descr_del_dict) @@ -12,13 +13,6 @@ # DEC_MINALLOC >= MPD_MINALLOC DEC_MINALLOC = 4 -def ensure_context(space, w_context): - context = space.interp_w(interp_context.W_Context, w_context, - can_be_None=True) - if context is None: - context = interp_context.getcontext(space) - return context - class W_Decimal(W_Root): hash = -1 @@ -39,6 +33,19 @@ if self.data: lltype.free(self.data, flavor='raw') + def descr_str(self, space): + context = interp_context.getcontext(space) + with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as cp_ptr: + size = rmpdec.mpd_to_sci_size(cp_ptr, self.mpd, context.capitals) + if size < 0: + raise OperationError(space.w_MemoryError, space.w_None) + cp = cp_ptr[0] + try: + result = rffi.charpsize2str(cp, size) + finally: + rmpdec.mpd_free(cp) + return space.wrap(result) # Convert bytes to unicode + def compare(self, space, w_other, op): if not isinstance(w_other, W_Decimal): # So far return space.w_NotImplemented @@ -57,21 +64,18 @@ def decimal_from_ssize(space, w_subtype, value, context, exact=True): w_result = space.allocate_instance(W_Decimal, w_subtype) W_Decimal.__init__(w_result, space) - with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: - with interp_context.ConvContext(context, exact) as ctx: - rmpdec.mpd_qset_ssize(w_result.mpd, value, ctx, status_ptr) - context.addstatus(space, status_ptr[0]) - + with interp_context.ConvContext( + space, w_result.mpd, context, exact) as (ctx, status_ptr): + rmpdec.mpd_qset_ssize(w_result.mpd, value, ctx, status_ptr) return w_result def decimal_from_cstring(space, w_subtype, value, context, exact=True): w_result = space.allocate_instance(W_Decimal, w_subtype) W_Decimal.__init__(w_result, space) - with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: - with interp_context.ConvContext(context, exact) as ctx: - rmpdec.mpd_qset_string(w_result.mpd, value, ctx, status_ptr) - context.addstatus(space, status_ptr[0]) + with interp_context.ConvContext( + space, w_result.mpd, context, exact) as (ctx, status_ptr): + rmpdec.mpd_qset_string(w_result.mpd, value, ctx, status_ptr) return w_result def decimal_from_unicode(space, w_subtype, w_value, context, exact=True, @@ -81,21 +85,58 @@ s = s.strip() return decimal_from_cstring(space, w_subtype, s, context, exact=exact) +def decimal_from_long(space, w_subtype, w_value, context, exact=True): + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + + value = space.bigint_w(w_value) + + with interp_context.ConvContext( + space, w_result.mpd, context, exact) as (ctx, status_ptr): + if value.sign == -1: + size = value.numdigits() + sign = rmpdec.MPD_NEG + else: + size = value.numdigits() + sign = rmpdec.MPD_POS + if rbigint.UDIGIT_TYPE.BITS == 32: + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, size) as digits: + for i in range(size): + digits[i] = value.udigit(i) + rmpdec.mpd_qimport_u32( + w_result.mpd, digits, size, sign, PyLong_BASE, + ctx, status_ptr) + elif rbigint.UDIGIT_TYPE.BITS == 64: + # No mpd_qimport_u64, so we convert to a string. + return decimal_from_cstring(space, w_subtype, value.str(), + context, exact=exact) + + else: + raise ValueError("Bad rbigint size") + return w_result + def decimal_from_object(space, w_subtype, w_value, context, exact=True): if w_value is None: return decimal_from_ssize(space, w_subtype, 0, context, exact=exact) elif space.isinstance_w(w_value, space.w_unicode): return decimal_from_unicode(space, w_subtype, w_value, context, - exact=True, strip_whitespace=True) + exact=exact, strip_whitespace=exact) + elif space.isinstance_w(w_value, space.w_int): + return decimal_from_long(space, w_subtype, w_value, context, + exact=exact) + raise oefmt(space.w_TypeError, + "conversion from %N to Decimal is not supported", + space.type(w_value)) @unwrap_spec(w_context=WrappedDefault(None)) def descr_new_decimal(space, w_subtype, w_value=None, w_context=None): - context = ensure_context(space, w_context) + context = interp_context.ensure_context(space, w_context) return decimal_from_object(space, w_subtype, w_value, context, exact=True) W_Decimal.typedef = TypeDef( 'Decimal', __new__ = interp2app(descr_new_decimal), + __str__ = interp2app(W_Decimal.descr_str), __eq__ = interp2app(W_Decimal.descr_eq), ) diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -1,9 +1,37 @@ -SIGNAL_NAMES = ( - 'DecimalException', 'Clamped', 'Rounded', 'Inexact', - 'Subnormal', 'Underflow', 'Overflow', 'DivisionByZero', - 'InvalidOperation', 'FloatOperation') +from rpython.rlib import rmpdec +from rpython.rlib.unroll import unrolling_iterable + +SIGNAL_MAP = unrolling_iterable([ + ('InvalidOperation', rmpdec.MPD_IEEE_Invalid_operation), + ('FloatOperation', rmpdec.MPD_Float_operation), + ('DivisionByZero', rmpdec.MPD_Division_by_zero), + ('Overflow', rmpdec.MPD_Overflow), + ('Underflow', rmpdec.MPD_Underflow), + ('Subnormal', rmpdec.MPD_Subnormal), + ('Inexact', rmpdec.MPD_Inexact), + ('Rounded', rmpdec.MPD_Rounded), + ('Clamped', rmpdec.MPD_Clamped), + ]) +# Exceptions that inherit from InvalidOperation +COND_MAP = unrolling_iterable([ + ('InvalidOperation', rmpdec.MPD_Invalid_operation), + ('ConversionSyntax', rmpdec.MPD_Conversion_syntax), + ('DivisionImpossible', rmpdec.MPD_Division_impossible), + ('DivisionUndefined', rmpdec.MPD_Division_undefined), + ('InvalidContext', rmpdec.MPD_Invalid_context), + ]) def flags_as_exception(space, flags): + w_exc = None + err_list = [] + for name, flag in SIGNAL_MAP: + if flags & flag: + w_exc = getattr(get(space), 'w_' + name) + if w_exc is None: + raise oefmt(space.w_RuntimeError, + "invalid error flag") + + raise ValueError(hex(flags)) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -1,8 +1,44 @@ class AppTestExplicitConstruction: spaceconfig = dict(usemodules=('_decimal',)) + def setup_class(cls): + space = cls.space + cls.w_decimal = space.call_function(space.builtin.get('__import__'), + space.wrap("_decimal")) + cls.w_Decimal = space.getattr(cls.w_decimal, space.wrap("Decimal")) + def test_explicit_empty(self): - import _decimal - Decimal = _decimal.Decimal + Decimal = self.Decimal assert Decimal() == Decimal("0") + def test_explicit_from_None(self): + Decimal = self.Decimal + raises(TypeError, Decimal, None) + + def test_explicit_from_int(self): + Decimal = self.decimal.Decimal + + #positive + d = Decimal(45) + assert str(d) == '45' + + #very large positive + d = Decimal(500000123) + assert str(d) == '500000123' + + #negative + d = Decimal(-45) + assert str(d) == '-45' + + #zero + d = Decimal(0) + assert str(d) == '0' + + # single word longs + for n in range(0, 32): + for sign in (-1, 1): + for x in range(-5, 5): + i = sign * (2**n + x) + d = Decimal(i) + assert str(d) == str(i) + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -40,6 +40,7 @@ "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", + "mpd_to_sci_size", "mpd_qcmp", ], compile_extra=compile_extra, @@ -52,21 +53,34 @@ 'ROUND_HALF_UP', 'ROUND_HALF_DOWN', 'ROUND_HALF_EVEN', 'ROUND_05UP', 'ROUND_TRUNC') +STATUS_FLAGS_CONSTANTS = ( + 'MPD_Clamped', 'MPD_Conversion_syntax', 'MPD_Division_by_zero', + 'MPD_Division_impossible', 'MPD_Division_undefined', 'MPD_Fpu_error', + 'MPD_Inexact', 'MPD_Invalid_context', 'MPD_Invalid_operation', + 'MPD_Malloc_error', 'MPD_Not_implemented', 'MPD_Overflow', + 'MPD_Rounded', 'MPD_Subnormal', 'MPD_Underflow', 'MPD_Max_status', + 'MPD_IEEE_Invalid_operation', 'MPD_Errors') + class CConfig: _compilation_info_ = eci MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') + + # Flags + MPD_POS = platform.ConstantInteger('MPD_POS') + MPD_NEG = platform.ConstantInteger('MPD_NEG') MPD_STATIC = platform.ConstantInteger('MPD_STATIC') MPD_STATIC_DATA = platform.ConstantInteger('MPD_STATIC_DATA') - MPD_Malloc_error = platform.ConstantInteger('MPD_Malloc_error') - for name in ROUND_CONSTANTS: name = 'MPD_' + name locals()[name] = platform.ConstantInteger(name) + for name in STATUS_FLAGS_CONSTANTS: + locals()[name] = platform.ConstantInteger(name) + MPD_T = platform.Struct('mpd_t', [('flags', rffi.UINT), ('exp', rffi.SSIZE_T), @@ -83,6 +97,7 @@ globals().update(platform.configure(CConfig)) +MPD_Float_operation = MPD_Not_implemented def external(name, args, result, **kwds): return rffi.llexternal(name, args, result, compilation_info=eci, **kwds) @@ -95,6 +110,9 @@ 'mpd_qset_ssize', [MPD_PTR, rffi.SSIZE_T, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qset_string = external( 'mpd_qset_string', [MPD_PTR, rffi.CCHARP, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qimport_u32 = external( + 'mpd_qimport_u32', [MPD_PTR, rffi.UINTP, rffi.SIZE_T, + rffi.UCHAR, rffi.UINT, MPD_CONTEXT_PTR, rffi.UINTP], rffi.SIZE_T) # Context operations mpd_getprec = external( @@ -122,6 +140,13 @@ mpd_maxcontext = external( 'mpd_maxcontext', [MPD_CONTEXT_PTR], lltype.Void) +mpd_free = external( + 'mpd_free', [rffi.VOIDP], lltype.Void, macro=True) + +# Conversion +mpd_to_sci_size = external( + 'mpd_to_sci_size', [rffi.CCHARPP, MPD_PTR, rffi.INT], rffi.SSIZE_T) + # Operations mpd_qcmp = external( 'mpd_qcmp', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) From noreply at buildbot.pypy.org Sun May 11 00:27:42 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:42 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fixes Message-ID: <20140510222742.81B391C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71453:67994435c7e0 Date: 2014-05-05 22:39 +0200 http://bitbucket.org/pypy/pypy/changeset/67994435c7e0/ Log: Fixes diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -431,7 +431,7 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): - self._value = value + self._value = value.decode('ascii') self.setup(w_type) def _compute_value(self, space): diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -197,8 +197,8 @@ if status & (rmpdec.MPD_Inexact | rmpdec.MPD_Rounded | rmpdec.MPD_Clamped): - rmpdec.seterror(self.mpd, - rmpdec.MPD_Invalid_operation, status_ptr) + rmpdec.mpd_seterror( + self.mpd, rmpdec.MPD_Invalid_operation, self.status_ptr) status = rffi.cast(lltype.Signed, self.status_ptr[0]) lltype.free(self.status_ptr, flavor='raw') status &= rmpdec.MPD_Errors diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -1,5 +1,6 @@ from rpython.rlib import rmpdec from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import oefmt SIGNAL_MAP = unrolling_iterable([ ('InvalidOperation', rmpdec.MPD_IEEE_Invalid_operation), diff --git a/pypy/module/_decimal/test/test_ztranslation.py b/pypy/module/_decimal/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/test/test_ztranslation.py @@ -0,0 +1,6 @@ + +from pypy.objspace.fake.checkmodule import checkmodule + +def test_checkmodule(): + checkmodule('_decimal') + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -143,6 +143,9 @@ mpd_free = external( 'mpd_free', [rffi.VOIDP], lltype.Void, macro=True) +mpd_seterror = external( + 'mpd_seterror', [MPD_PTR, rffi.UINT, rffi.UINTP], lltype.Void) + # Conversion mpd_to_sci_size = external( 'mpd_to_sci_size', [rffi.CCHARPP, MPD_PTR, rffi.INT], rffi.SSIZE_T) From noreply at buildbot.pypy.org Sun May 11 00:27:43 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:43 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Progress Message-ID: <20140510222743.B77321C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71454:0c0a9f75a3c7 Date: 2014-05-05 23:42 +0200 http://bitbucket.org/pypy/pypy/changeset/0c0a9f75a3c7/ Log: Progress diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -5,6 +5,7 @@ class Module(MixedModule): appleveldefs = { + 'localcontext': 'app_context.localcontext', } interpleveldefs = { diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -16,17 +16,34 @@ # initialized to new SignalDicts. # Once a SignalDict is tied to a context, it cannot be deleted. class W_SignalDictMixin(W_Root): - pass + def __init__(self, flag_ptr): + self.flag_ptr = flag_ptr -def descr_new_signaldict(space, w_subtype): - w_result = space.allocate_instance(W_SignalDictMixin, w_subtype) - W_SignalDictMixin.__init__(w_result) - return w_result + def descr_getitem(self, space, w_key): + flag = interp_signals.exception_as_flag(space, w_key) + return space.wrap(bool(flag & self.flag_ptr[0])) + + def descr_setitem(self, space, w_key, w_value): + flag = interp_signals.exception_as_flag(space, w_key) + if space.is_true(w_value): + self.flag_ptr[0] |= flag + else: + self.flag_ptr[0] &= ~flag + + +def new_signal_dict(space, flag_ptr): + w_dict = space.allocate_instance(W_SignalDictMixin, + state_get(space).W_SignalDict) + W_SignalDictMixin.__init__(w_dict, flag_ptr) + return w_dict + W_SignalDictMixin.typedef = TypeDef( 'SignalDictMixin', - __new__ = interp2app(descr_new_signaldict), + __getitem__ = interp2app(W_SignalDictMixin.descr_getitem), + __setitem__ = interp2app(W_SignalDictMixin.descr_setitem), ) +W_SignalDictMixin.typedef.acceptable_as_base_class = True class State: @@ -54,8 +71,11 @@ self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, flavor='raw', zero=True, track_allocation=False) - self.w_flags = space.call_function(state_get(space).W_SignalDict) - self.capitals = 0 + self.w_flags = new_signal_dict( + space, lltype.direct_fieldptr(self.ctx, 'c_status')) + self.w_traps = new_signal_dict( + space, lltype.direct_fieldptr(self.ctx, 'c_traps')) + self.capitals = 1 def __del__(self): if self.ctx: @@ -142,6 +162,7 @@ 'Context', copy=interp2app(W_Context.copy_w), flags=interp_attrproperty_w('w_flags', W_Context), + traps=interp_attrproperty_w('w_traps', W_Context), prec=GetSetProperty(W_Context.get_prec, W_Context.set_prec), rounding=GetSetProperty(W_Context.get_rounding, W_Context.set_rounding), Emin=GetSetProperty(W_Context.get_emin, W_Context.set_emin), diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -5,6 +5,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict, descr_set_dict, descr_del_dict) +from pypy.objspace.std import unicodeobject from pypy.module._decimal import interp_context @@ -80,7 +81,9 @@ def decimal_from_unicode(space, w_subtype, w_value, context, exact=True, strip_whitespace=True): - s = space.str_w(w_value) # XXX numeric_as_ascii() is different + s = unicodeobject.unicode_to_decimal_w(space, w_value) + if '\0' in s: + s = '' # empty string triggers ConversionSyntax. if strip_whitespace: s = s.strip() return decimal_from_cstring(space, w_subtype, s, context, exact=exact) diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -1,6 +1,6 @@ from rpython.rlib import rmpdec from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError SIGNAL_MAP = unrolling_iterable([ ('InvalidOperation', rmpdec.MPD_IEEE_Invalid_operation), @@ -31,9 +31,14 @@ if w_exc is None: raise oefmt(space.w_RuntimeError, "invalid error flag") - - - raise ValueError(hex(flags)) + return OperationError(w_exc, space.w_None) + +def exception_as_flag(space, w_exc): + for name, flag in SIGNAL_MAP: + if space.is_w(w_exc, getattr(get(space), 'w_' + name)): + return flag + raise oefmt(space.w_KeyError, + "invalid error flag") class SignalState: diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -42,3 +42,51 @@ d = Decimal(i) assert str(d) == str(i) + def test_explicit_from_string(self): + Decimal = self.decimal.Decimal + InvalidOperation = self.decimal.InvalidOperation + localcontext = self.decimal.localcontext + + #empty + assert str(Decimal('')) == 'NaN' + + #int + assert str(Decimal('45')) == '45' + + #float + assert str(Decimal('45.34')) == '45.34' + + #engineer notation + assert str(Decimal('45e2')) == '4.5E+3' + + #just not a number + assert str(Decimal('ugly')) == 'NaN' + + #leading and trailing whitespace permitted + assert str(Decimal('1.3E4 \n')) == '1.3E+4' + assert str(Decimal(' -7.89')) == '-7.89' + assert str(Decimal(" 3.45679 ")) == '3.45679' + + # unicode whitespace + for lead in ["", ' ', '\u00a0', '\u205f']: + for trail in ["", ' ', '\u00a0', '\u205f']: + assert str(Decimal(lead + '9.311E+28' + trail)) == '9.311E+28' + + with localcontext() as c: + c.traps[InvalidOperation] = True + # Invalid string + raises(InvalidOperation, Decimal, "xyz") + # Two arguments max + raises(TypeError, Decimal, "1234", "x", "y") + + # space within the numeric part + raises(InvalidOperation, Decimal, "1\u00a02\u00a03") + raises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0") + + # unicode whitespace + raises(InvalidOperation, Decimal, "\u00a0") + raises(InvalidOperation, Decimal, "\u00a0\u00a0") + + # embedded NUL + raises(InvalidOperation, Decimal, "12\u00003") + From noreply at buildbot.pypy.org Sun May 11 00:27:45 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:45 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add tuple->Decimal conversion Message-ID: <20140510222745.015F11C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71455:c80a6ebb000b Date: 2014-05-06 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/c80a6ebb000b/ Log: Add tuple->Decimal conversion diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1,4 +1,5 @@ from rpython.rlib import rmpdec, rarithmetic, rbigint +from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import oefmt, OperationError @@ -118,6 +119,82 @@ raise ValueError("Bad rbigint size") return w_result +def decimal_from_tuple(space, w_subtype, w_value, context, exact=True): + w_sign, w_digits, w_exponent = space.unpackiterable(w_value, 3) + + # Make a string representation of a DecimalTuple + builder = StringBuilder(20) + + # sign + try: + sign = space.int_w(w_sign) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + sign = -1 + if sign != 0 and sign != 1: + raise oefmt(space.w_ValueError, + "sign must be an integer with the value 0 or 1") + builder.append('-' if sign else '+') + + # exponent or encoding for a special number + is_infinite = False + is_special = False + exponent = 0 + if space.isinstance_w(w_exponent, space.w_unicode): + # special + is_special = True + val = space.unicode_w(w_exponent) + if val == 'F': + builder.append('Inf') + is_infinite = True + elif val == 'n': + builder.append('Nan') + elif val == 'N': + builder.append('sNan') + else: + raise oefmt(space.w_ValueError, + "string argument in the third position " + "must be 'F', 'n' or 'N'") + else: + # exponent + try: + exponent = space.int_w(w_exponent) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + raise oefmt(space.w_ValueError, + "exponent must be an integer") + + # coefficient + digits_w = space.unpackiterable(w_digits) + + if not digits_w and not is_special: + # empty tuple: zero coefficient, except for special numbers + strval += '0' + for w_digit in digits_w: + try: + digit = space.int_w(w_digit) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + digit = -1 + if not 0 <= digit <= 9: + raise oefmt(space.w_ValueError, + "coefficient must be a tuple of digits") + if is_infinite: + # accept but ignore any well-formed coefficient for + # compatibility with decimal.py + continue + builder.append(chr(ord('0') + digit)) + + if not is_special: + builder.append('E') + builder.append(str(exponent)) + + strval = builder.build() + return decimal_from_cstring(space, w_subtype, strval, context, exact=exact) + def decimal_from_object(space, w_subtype, w_value, context, exact=True): if w_value is None: return decimal_from_ssize(space, w_subtype, 0, context, exact=exact) @@ -127,6 +204,10 @@ elif space.isinstance_w(w_value, space.w_int): return decimal_from_long(space, w_subtype, w_value, context, exact=exact) + elif (space.isinstance_w(w_value, space.w_list) or + space.isinstance_w(w_value, space.w_tuple)): + return decimal_from_tuple(space, w_subtype, w_value, context, + exact=exact) raise oefmt(space.w_TypeError, "conversion from %N to Decimal is not supported", space.type(w_value)) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -90,3 +90,61 @@ # embedded NUL raises(InvalidOperation, Decimal, "12\u00003") + def test_explicit_from_tuples(self): + Decimal = self.decimal.Decimal + + #zero + d = Decimal( (0, (0,), 0) ) + assert str(d) == '0' + + #int + d = Decimal( (1, (4, 5), 0) ) + assert str(d) == '-45' + + #float + d = Decimal( (0, (4, 5, 3, 4), -2) ) + assert str(d) == '45.34' + + #weird + d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) + assert str(d) == '-4.34913534E-17' + + #inf + d = Decimal( (0, (), "F") ) + assert str(d) == 'Infinity' + + #wrong number of items + raises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) ) + + #bad sign + raises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) ) + raises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) ) + raises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2)) + + #bad exp + raises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') ) + raises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) ) + raises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') ) + + #bad coefficients + raises(ValueError, Decimal, (1, "xyz", 2) ) + raises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) ) + raises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) ) + raises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) ) + raises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) ) + + def test_explicit_from_list(self): + Decimal = self.decimal.Decimal + + d = Decimal([0, [0], 0]) + assert str(d) == '0' + + d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25]) + assert str(d) == '-4.34913534E-17' + + d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25]) + assert str(d) == '-4.34913534E-17' + + d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25)) + assert str(d) == '-4.34913534E-17' + From noreply at buildbot.pypy.org Sun May 11 00:27:46 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:46 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Decimal: More tests, more constructors. Message-ID: <20140510222746.62D471C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71456:8667f0db8c8c Date: 2014-05-07 00:30 +0200 http://bitbucket.org/pypy/pypy/changeset/8667f0db8c8c/ Log: Decimal: More tests, more constructors. diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -21,14 +21,16 @@ def descr_getitem(self, space, w_key): flag = interp_signals.exception_as_flag(space, w_key) - return space.wrap(bool(flag & self.flag_ptr[0])) + cur_flag = rffi.cast(lltype.Signed, self.flag_ptr[0]) + return space.wrap(bool(flag & cur_flag)) def descr_setitem(self, space, w_key, w_value): flag = interp_signals.exception_as_flag(space, w_key) + cur_flag = rffi.cast(lltype.Signed, self.flag_ptr[0]) if space.is_true(w_value): - self.flag_ptr[0] |= flag + self.flag_ptr[0] = rffi.cast(rffi.UINT, cur_flag | flag) else: - self.flag_ptr[0] &= ~flag + self.flag_ptr[0] = rffi.cast(rffi.UINT, cur_flag & ~flag) def new_signal_dict(space, flag_ptr): diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -35,6 +35,20 @@ if self.data: lltype.free(self.data, flavor='raw') + def apply(self, context, w_subtype=None): + # Apply the context to the input operand. Return a new W_Decimal. + if subtype: + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + else: + w_result = W_Decimal(space) + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: + rpmdec.mpd_qcopy(w_result.mpd, self.mpd, status_ptr) + context.addstatus(self.space, status_ptr[0]) + rpmdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) + context.addstatus(self.space, status_ptr[0]) + return w_result + def descr_str(self, space): context = interp_context.getcontext(space) with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as cp_ptr: @@ -48,6 +62,9 @@ rmpdec.mpd_free(cp) return space.wrap(result) # Convert bytes to unicode + def descr_bool(self, space): + return space.wrap(not rmpdec.mpd_iszero(self.mpd)) + def compare(self, space, w_other, op): if not isinstance(w_other, W_Decimal): # So far return space.w_NotImplemented @@ -145,12 +162,12 @@ # special is_special = True val = space.unicode_w(w_exponent) - if val == 'F': + if val == u'F': builder.append('Inf') is_infinite = True - elif val == 'n': + elif val == u'n': builder.append('Nan') - elif val == 'N': + elif val == u'N': builder.append('sNan') else: raise oefmt(space.w_ValueError, @@ -171,7 +188,7 @@ if not digits_w and not is_special: # empty tuple: zero coefficient, except for special numbers - strval += '0' + builder.append('0') for w_digit in digits_w: try: digit = space.int_w(w_digit) @@ -195,9 +212,35 @@ strval = builder.build() return decimal_from_cstring(space, w_subtype, strval, context, exact=exact) +def decimal_from_decimal(space, w_subtype, w_value, context, exact=True): + assert isinstance(w_value, W_Decimal) + if exact: + if space.is_w(w_subtype, space.gettypeobject(W_Decimal.typedef)): + return w_value + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + with interp_context.ConvContext( + space, w_result.mpd, context, exact) as (ctx, status_ptr): + rmpdec.mpd_qcopy(w_result.mpd, w_value.mpd, status_ptr) + return w_result + else: + if (rmpdec.mpd_isnan(w_value.mpd) and + w_value.mpd.digits > (context.ctx.prec - context.ctx.clamp)): + # Special case: too many NaN payload digits + context.addstatus(space, rmpdec.MPD_Conversion_syntax) + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + rmpdec.mpd_setspecial(w_result.mpd, rmpdec.MPD_POS, rmpdec.MPD_NAN) + else: + return w_value.apply(context) + + def decimal_from_object(space, w_subtype, w_value, context, exact=True): if w_value is None: return decimal_from_ssize(space, w_subtype, 0, context, exact=exact) + elif isinstance(w_value, W_Decimal): + return decimal_from_decimal(space, w_subtype, w_value, context, + exact=exact) elif space.isinstance_w(w_value, space.w_unicode): return decimal_from_unicode(space, w_subtype, w_value, context, exact=exact, strip_whitespace=exact) @@ -222,5 +265,6 @@ 'Decimal', __new__ = interp2app(descr_new_decimal), __str__ = interp2app(W_Decimal.descr_str), + __bool__ = interp2app(W_Decimal.descr_bool), __eq__ = interp2app(W_Decimal.descr_eq), ) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -148,3 +148,34 @@ d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25)) assert str(d) == '-4.34913534E-17' + def test_explicit_from_bool(self): + Decimal = self.decimal.Decimal + + assert bool(Decimal(0)) is False + assert bool(Decimal(1)) is True + assert Decimal(False) == Decimal(0) + assert Decimal(True) == Decimal(1) + + def test_explicit_from_Decimal(self): + Decimal = self.decimal.Decimal + + #positive + d = Decimal(45) + e = Decimal(d) + assert str(e) == '45' + + #very large positive + d = Decimal(500000123) + e = Decimal(d) + assert str(e) == '500000123' + + #negative + d = Decimal(-45) + e = Decimal(d) + assert str(e) == '-45' + + #zero + d = Decimal(0) + e = Decimal(d) + assert str(e) == '0' + diff --git a/pypy/module/_decimal/test/test_ztranslation.py b/pypy/module/_decimal/test/test_ztranslation.py --- a/pypy/module/_decimal/test/test_ztranslation.py +++ b/pypy/module/_decimal/test/test_ztranslation.py @@ -1,6 +1,9 @@ from pypy.objspace.fake.checkmodule import checkmodule +from pypy.module._decimal import Module def test_checkmodule(): + Module.interpleveldefs['__hack'] = ( + 'interp_decimal.unicodeobject.W_UnicodeObject(u"")') checkmodule('_decimal') diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -36,11 +36,12 @@ libdir.join('memory.c'), ], export_symbols=[ - "mpd_qset_ssize", "mpd_qset_string", + "mpd_qset_ssize", "mpd_qset_string", "mpd_qcopy", "mpd_setspecial", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", "mpd_to_sci_size", + "mpd_iszero", "mpd_isnan", "mpd_qcmp", ], compile_extra=compile_extra, @@ -71,6 +72,7 @@ # Flags MPD_POS = platform.ConstantInteger('MPD_POS') MPD_NEG = platform.ConstantInteger('MPD_NEG') + MPD_NAN = platform.ConstantInteger('MPD_NAN') MPD_STATIC = platform.ConstantInteger('MPD_STATIC') MPD_STATIC_DATA = platform.ConstantInteger('MPD_STATIC_DATA') @@ -113,6 +115,10 @@ mpd_qimport_u32 = external( 'mpd_qimport_u32', [MPD_PTR, rffi.UINTP, rffi.SIZE_T, rffi.UCHAR, rffi.UINT, MPD_CONTEXT_PTR, rffi.UINTP], rffi.SIZE_T) +mpd_qcopy = external( + 'mpd_qcopy', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) +mpd_setspecial = external( + 'mpd_setspecial', [MPD_PTR, rffi.UCHAR, rffi.UCHAR], lltype.Void) # Context operations mpd_getprec = external( @@ -151,5 +157,9 @@ 'mpd_to_sci_size', [rffi.CCHARPP, MPD_PTR, rffi.INT], rffi.SSIZE_T) # Operations +mpd_iszero = external( + 'mpd_iszero', [MPD_PTR], rffi.INT) +mpd_isnan = external( + 'mpd_isnan', [MPD_PTR], rffi.INT) mpd_qcmp = external( 'mpd_qcmp', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) From noreply at buildbot.pypy.org Sun May 11 00:27:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:47 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: One more test, progress! Message-ID: <20140510222747.962A51C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71457:2277990d7939 Date: 2014-05-09 23:12 +0200 http://bitbucket.org/pypy/pypy/changeset/2277990d7939/ Log: One more test, progress! diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1,4 +1,4 @@ -from rpython.rlib import rmpdec, rarithmetic, rbigint +from rpython.rlib import rmpdec, rarithmetic, rbigint, rfloat from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root @@ -20,7 +20,8 @@ def __init__(self, space): self.mpd = lltype.malloc(rmpdec.MPD_PTR.TO, flavor='raw') - self.data = lltype.malloc(rffi.UINTP.TO, DEC_MINALLOC, flavor='raw') + self.data = lltype.malloc(rmpdec.MPD_UINT_PTR.TO, + DEC_MINALLOC, flavor='raw') rffi.setintfield(self.mpd, 'c_flags', rmpdec.MPD_STATIC | rmpdec.MPD_STATIC_DATA) self.mpd.c_exp = 0 @@ -43,9 +44,9 @@ else: w_result = W_Decimal(space) with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: - rpmdec.mpd_qcopy(w_result.mpd, self.mpd, status_ptr) + rmpdec.mpd_qcopy(w_result.mpd, self.mpd, status_ptr) context.addstatus(self.space, status_ptr[0]) - rpmdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) + rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) context.addstatus(self.space, status_ptr[0]) return w_result @@ -65,6 +66,19 @@ def descr_bool(self, space): return space.wrap(not rmpdec.mpd_iszero(self.mpd)) + def descr_float(self, space): + if rmpdec.mpd_isnan(self.mpd): + if rmpdec.mpd_issnan(self.mpd): + raise oefmt(space.w_ValueError, + "cannot convert signaling NaN to float") + if rmpdec.mpd_isnegative(self.mpd): + w_s = space.wrap("-nan") + else: + w_s = space.wrap("nan") + else: + w_s = self.descr_str(space) + return space.call_function(space.w_float, w_s) + def compare(self, space, w_other, op): if not isinstance(w_other, W_Decimal): # So far return space.w_NotImplemented @@ -78,6 +92,12 @@ def descr_eq(self, space, w_other): return self.compare(space, w_other, 'eq') + # Boolean functions + def is_qnan_w(self, space): + return space.wrap(bool(rmpdec.mpd_isqnan(self.mpd))) + def is_infinite_w(self, space): + return space.wrap(bool(rmpdec.mpd_isinfinite(self.mpd))) + # Constructors def decimal_from_ssize(space, w_subtype, value, context, exact=True): @@ -106,12 +126,10 @@ s = s.strip() return decimal_from_cstring(space, w_subtype, s, context, exact=exact) -def decimal_from_long(space, w_subtype, w_value, context, exact=True): +def decimal_from_bigint(space, w_subtype, value, context, exact=True): w_result = space.allocate_instance(W_Decimal, w_subtype) W_Decimal.__init__(w_result, space) - value = space.bigint_w(w_value) - with interp_context.ConvContext( space, w_result.mpd, context, exact) as (ctx, status_ptr): if value.sign == -1: @@ -233,7 +251,55 @@ rmpdec.mpd_setspecial(w_result.mpd, rmpdec.MPD_POS, rmpdec.MPD_NAN) else: return w_value.apply(context) - + +def decimal_from_float(space, w_subtype, w_value, context, exact=True): + value = space.float_w(w_value) + sign = 0 if rfloat.copysign(1.0, value) == 1.0 else 1 + + if rfloat.isnan(value): + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + # decimal.py calls repr(float(+-nan)), which always gives a + # positive result. + rmpdec.mpd_setspecial(w_result.mpd, rmpdec.MPD_POS, rmpdec.MPD_NAN) + return w_result + if rfloat.isinf(value): + w_result = space.allocate_instance(W_Decimal, w_subtype) + W_Decimal.__init__(w_result, space) + rmpdec.mpd_setspecial(w_result.mpd, sign, rmpdec.MPD_INF) + return w_result + + # float as integer ratio: numerator/denominator + num, den = rfloat.float_as_rbigint_ratio(abs(value)) + k = den.bit_length() - 1 + + w_result = decimal_from_bigint(space, w_subtype, num, context, exact=True) + + # Compute num * 5**k + d1 = rmpdec.mpd_qnew() + if not d1: + raise OperationError(space.w_MemoryError, space.w_None) + d2 = rmpdec.mpd_qnew() + if not d2: + raise OperationError(space.w_MemoryError, space.w_None) + with interp_context.ConvContext( + space, w_result.mpd, context, exact=True) as (ctx, status_ptr): + rmpdec.mpd_qset_uint(d1, 5, ctx, status_ptr) + rmpdec.mpd_qset_ssize(d2, k, ctx, status_ptr) + rmpdec.mpd_qpow(d1, d1, d2, ctx, status_ptr) + with interp_context.ConvContext( + space, w_result.mpd, context, exact=True) as (ctx, status_ptr): + rmpdec.mpd_qmul(w_result.mpd, w_result.mpd, d1, ctx, status_ptr) + + # result = +- n * 5**k * 10**-k + rmpdec.mpd_set_sign(w_result.mpd, sign) + w_result.mpd.c_exp = - k + + if not exact: + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: + rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) + context.addstatus(space, status_ptr[0]) + return w_result def decimal_from_object(space, w_subtype, w_value, context, exact=True): if w_value is None: @@ -245,12 +311,17 @@ return decimal_from_unicode(space, w_subtype, w_value, context, exact=exact, strip_whitespace=exact) elif space.isinstance_w(w_value, space.w_int): - return decimal_from_long(space, w_subtype, w_value, context, - exact=exact) + value = space.bigint_w(w_value) + return decimal_from_bigint(space, w_subtype, value, context, + exact=exact) elif (space.isinstance_w(w_value, space.w_list) or space.isinstance_w(w_value, space.w_tuple)): return decimal_from_tuple(space, w_subtype, w_value, context, exact=exact) + elif space.isinstance_w(w_value, space.w_float): + context.addstatus(space, rmpdec.MPD_Float_operation) + return decimal_from_float(space, w_subtype, w_value, context, + exact=exact) raise oefmt(space.w_TypeError, "conversion from %N to Decimal is not supported", space.type(w_value)) @@ -266,5 +337,8 @@ __new__ = interp2app(descr_new_decimal), __str__ = interp2app(W_Decimal.descr_str), __bool__ = interp2app(W_Decimal.descr_bool), + __float__ = interp2app(W_Decimal.descr_float), __eq__ = interp2app(W_Decimal.descr_eq), + is_qnan = interp2app(W_Decimal.is_qnan_w), + is_infinite = interp2app(W_Decimal.is_infinite_w), ) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -1,11 +1,18 @@ +from pypy.interpreter import gateway +import random + class AppTestExplicitConstruction: - spaceconfig = dict(usemodules=('_decimal',)) + spaceconfig = dict(usemodules=('_decimal', '_random')) def setup_class(cls): space = cls.space cls.w_decimal = space.call_function(space.builtin.get('__import__'), space.wrap("_decimal")) cls.w_Decimal = space.getattr(cls.w_decimal, space.wrap("Decimal")) + def random_float(space): + f = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) + return space.wrap(f) + cls.w_random_float = space.wrap(gateway.interp2app(random_float)) def test_explicit_empty(self): Decimal = self.Decimal @@ -179,3 +186,20 @@ e = Decimal(d) assert str(e) == '0' + def test_explicit_from_float(self): + Decimal = self.decimal.Decimal + + r = Decimal(0.1) + assert type(r) is Decimal + assert str(r) == ( + '0.1000000000000000055511151231257827021181583404541015625') + assert Decimal(float('nan')).is_qnan() + assert Decimal(float('inf')).is_infinite() + assert Decimal(float('-inf')).is_infinite() + assert str(Decimal(float('nan'))) == str(Decimal('NaN')) + assert str(Decimal(float('inf'))) == str(Decimal('Infinity')) + assert str(Decimal(float('-inf'))) == str(Decimal('-Infinity')) + assert str(Decimal(float('-0.0'))) == str(Decimal('-0')) + for i in range(200): + x = self.random_float() + assert x == float(Decimal(x)) # roundtrip diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -12,7 +12,7 @@ if sys.maxsize > 1<<32: compile_extra.append("-DCONFIG_64") # This suppose a x64 platform with gcc inline assembler. - compile_extra.append("-DASM") + compile_extra.append("-DANSI") else: compile_extra.append("-DCONFIG_32") compile_extra.append("-DANSI") @@ -36,13 +36,17 @@ libdir.join('memory.c'), ], export_symbols=[ - "mpd_qset_ssize", "mpd_qset_string", "mpd_qcopy", "mpd_setspecial", + "mpd_qset_ssize", "mpd_qset_uint", "mpd_qset_string", "mpd_qcopy", "mpd_setspecial", + "mpd_set_sign", "mpd_qfinalize", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", + "mpd_qnew", "mpd_to_sci_size", - "mpd_iszero", "mpd_isnan", + "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", + "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", + "mpd_qpow", "mpd_qmul", ], compile_extra=compile_extra, libraries=['m'], @@ -64,6 +68,13 @@ class CConfig: _compilation_info_ = eci + MPD_UINT_T = platform.SimpleType('mpd_uint_t', rffi.INT) + +MPD_UINT_T = platform.configure(CConfig)['MPD_UINT_T'] +MPD_UINT_PTR = rffi.CArrayPtr(MPD_UINT_T) + +class CConfig: + _compilation_info_ = eci MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') @@ -73,6 +84,7 @@ MPD_POS = platform.ConstantInteger('MPD_POS') MPD_NEG = platform.ConstantInteger('MPD_NEG') MPD_NAN = platform.ConstantInteger('MPD_NAN') + MPD_INF = platform.ConstantInteger('MPD_INF') MPD_STATIC = platform.ConstantInteger('MPD_STATIC') MPD_STATIC_DATA = platform.ConstantInteger('MPD_STATIC_DATA') @@ -84,12 +96,12 @@ locals()[name] = platform.ConstantInteger(name) MPD_T = platform.Struct('mpd_t', - [('flags', rffi.UINT), + [('flags', rffi.UCHAR), ('exp', rffi.SSIZE_T), ('digits', rffi.SSIZE_T), ('len', rffi.SSIZE_T), ('alloc', rffi.SSIZE_T), - ('data', rffi.UINTP), + ('data', MPD_UINT_PTR), ]) MPD_CONTEXT_T = platform.Struct('mpd_context_t', [('traps', rffi.UINT), @@ -110,6 +122,8 @@ # Initialization mpd_qset_ssize = external( 'mpd_qset_ssize', [MPD_PTR, rffi.SSIZE_T, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qset_uint = external( + 'mpd_qset_uint', [MPD_PTR, rffi.UINT, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qset_string = external( 'mpd_qset_string', [MPD_PTR, rffi.CCHARP, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qimport_u32 = external( @@ -119,6 +133,10 @@ 'mpd_qcopy', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) mpd_setspecial = external( 'mpd_setspecial', [MPD_PTR, rffi.UCHAR, rffi.UCHAR], lltype.Void) +mpd_set_sign = external( + 'mpd_set_sign', [MPD_PTR, rffi.UCHAR], lltype.Void) +mpd_qfinalize = external( + 'mpd_qfinalize', [MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) # Context operations mpd_getprec = external( @@ -146,6 +164,8 @@ mpd_maxcontext = external( 'mpd_maxcontext', [MPD_CONTEXT_PTR], lltype.Void) +mpd_qnew = external( + 'mpd_qnew', [], MPD_PTR) mpd_free = external( 'mpd_free', [rffi.VOIDP], lltype.Void, macro=True) @@ -159,7 +179,24 @@ # Operations mpd_iszero = external( 'mpd_iszero', [MPD_PTR], rffi.INT) +mpd_isnegative = external( + 'mpd_isnegative', [MPD_PTR], rffi.INT) +mpd_isinfinite = external( + 'mpd_isinfinite', [MPD_PTR], rffi.INT) mpd_isnan = external( 'mpd_isnan', [MPD_PTR], rffi.INT) +mpd_issnan = external( + 'mpd_issnan', [MPD_PTR], rffi.INT) +mpd_isqnan = external( + 'mpd_isqnan', [MPD_PTR], rffi.INT) mpd_qcmp = external( 'mpd_qcmp', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) + +mpd_qpow = external( + 'mpd_qpow', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qmul = external( + 'mpd_qmul', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) From noreply at buildbot.pypy.org Sun May 11 00:27:48 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:48 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Lots of progress in context.create_decimal(). Message-ID: <20140510222748.C64921C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71458:f8e3661a9e69 Date: 2014-05-10 12:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f8e3661a9e69/ Log: Lots of progress in context.create_decimal(). diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -67,12 +67,28 @@ ROUND_CONSTANTS = unrolling_iterable([ (name, getattr(rmpdec, 'MPD_' + name)) for name in rmpdec.ROUND_CONSTANTS]) +DEC_DFLT_EMAX = 999999 +DEC_DFLT_EMIN = -999999 class W_Context(W_Root): def __init__(self, space): self.ctx = lltype.malloc(rmpdec.MPD_CONTEXT_PTR.TO, flavor='raw', zero=True, track_allocation=False) + # Default context + self.ctx.c_prec = 28 + self.ctx.c_emax = DEC_DFLT_EMAX + self.ctx.c_emin = DEC_DFLT_EMIN + rffi.setintfield(self.ctx, 'c_traps', + (rmpdec.MPD_IEEE_Invalid_operation| + rmpdec.MPD_Division_by_zero| + rmpdec.MPD_Overflow)) + rffi.setintfield(self.ctx, 'c_status', 0) + rffi.setintfield(self.ctx, 'c_newtrap', 0) + rffi.setintfield(self.ctx, 'c_round', rmpdec.MPD_ROUND_HALF_EVEN) + rffi.setintfield(self.ctx, 'c_clamp', 0) + rffi.setintfield(self.ctx, 'c_allcr', 1) + self.w_flags = new_signal_dict( space, lltype.direct_fieldptr(self.ctx, 'c_status')) self.w_traps = new_signal_dict( @@ -97,7 +113,8 @@ def copy_w(self, space): w_copy = W_Context(space) - # XXX incomplete + rffi.structcopy(w_copy.ctx, self.ctx) + w_copy.capitals = self.capitals return w_copy def get_prec(self, space): @@ -154,6 +171,11 @@ raise oefmt(space.w_ValueError, "valid values for clamp are 0 or 1") + def create_decimal_w(self, space, w_value=None): + from pypy.module._decimal import interp_decimal + return interp_decimal.decimal_from_object( + space, None, w_value, self, exact=False) + def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) @@ -162,7 +184,8 @@ W_Context.typedef = TypeDef( 'Context', - copy=interp2app(W_Context.copy_w), + __new__ = interp2app(descr_new_context), + # Attributes flags=interp_attrproperty_w('w_flags', W_Context), traps=interp_attrproperty_w('w_traps', W_Context), prec=GetSetProperty(W_Context.get_prec, W_Context.set_prec), @@ -170,7 +193,9 @@ Emin=GetSetProperty(W_Context.get_emin, W_Context.set_emin), Emax=GetSetProperty(W_Context.get_emax, W_Context.set_emax), clamp=GetSetProperty(W_Context.get_clamp, W_Context.set_clamp), - __new__ = interp2app(descr_new_context), + # + copy=interp2app(W_Context.copy_w), + create_decimal=interp2app(W_Context.create_decimal_w), ) @@ -224,6 +249,7 @@ self.mpd, rmpdec.MPD_Invalid_operation, self.status_ptr) status = rffi.cast(lltype.Signed, self.status_ptr[0]) lltype.free(self.status_ptr, flavor='raw') - status &= rmpdec.MPD_Errors + if self.exact: + status &= rmpdec.MPD_Errors # May raise a DecimalException self.context.addstatus(self.space, status) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -36,18 +36,23 @@ if self.data: lltype.free(self.data, flavor='raw') - def apply(self, context, w_subtype=None): - # Apply the context to the input operand. Return a new W_Decimal. - if subtype: + @staticmethod + def allocate(space, w_subtype=None): + if w_subtype: w_result = space.allocate_instance(W_Decimal, w_subtype) W_Decimal.__init__(w_result, space) else: w_result = W_Decimal(space) + return w_result + + def apply(self, space, context, w_subtype=None): + # Apply the context to the input operand. Return a new W_Decimal. + w_result = W_Decimal.allocate(space, w_subtype) with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: rmpdec.mpd_qcopy(w_result.mpd, self.mpd, status_ptr) - context.addstatus(self.space, status_ptr[0]) + context.addstatus(space, status_ptr[0]) rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) - context.addstatus(self.space, status_ptr[0]) + context.addstatus(space, status_ptr[0]) return w_result def descr_str(self, space): @@ -101,17 +106,14 @@ # Constructors def decimal_from_ssize(space, w_subtype, value, context, exact=True): - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) + w_result = W_Decimal.allocate(space, w_subtype) with interp_context.ConvContext( space, w_result.mpd, context, exact) as (ctx, status_ptr): rmpdec.mpd_qset_ssize(w_result.mpd, value, ctx, status_ptr) return w_result def decimal_from_cstring(space, w_subtype, value, context, exact=True): - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) - + w_result = W_Decimal.allocate(space, w_subtype) with interp_context.ConvContext( space, w_result.mpd, context, exact) as (ctx, status_ptr): rmpdec.mpd_qset_string(w_result.mpd, value, ctx, status_ptr) @@ -127,8 +129,7 @@ return decimal_from_cstring(space, w_subtype, s, context, exact=exact) def decimal_from_bigint(space, w_subtype, value, context, exact=True): - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) + w_result = W_Decimal.allocate(space, w_subtype) with interp_context.ConvContext( space, w_result.mpd, context, exact) as (ctx, status_ptr): @@ -235,37 +236,34 @@ if exact: if space.is_w(w_subtype, space.gettypeobject(W_Decimal.typedef)): return w_value - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) + w_result = W_Decimal.allocate(space, w_subtype) with interp_context.ConvContext( space, w_result.mpd, context, exact) as (ctx, status_ptr): rmpdec.mpd_qcopy(w_result.mpd, w_value.mpd, status_ptr) return w_result else: if (rmpdec.mpd_isnan(w_value.mpd) and - w_value.mpd.digits > (context.ctx.prec - context.ctx.clamp)): + w_value.mpd.c_digits > (context.ctx.c_prec - context.ctx.c_clamp)): # Special case: too many NaN payload digits context.addstatus(space, rmpdec.MPD_Conversion_syntax) - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) + w_result = W_Decimal.allocate(space, w_subtype) rmpdec.mpd_setspecial(w_result.mpd, rmpdec.MPD_POS, rmpdec.MPD_NAN) + return w_result else: - return w_value.apply(context) + return w_value.apply(space, context) def decimal_from_float(space, w_subtype, w_value, context, exact=True): value = space.float_w(w_value) sign = 0 if rfloat.copysign(1.0, value) == 1.0 else 1 if rfloat.isnan(value): - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) + w_result = W_Decimal.allocate(space, w_subtype) # decimal.py calls repr(float(+-nan)), which always gives a # positive result. rmpdec.mpd_setspecial(w_result.mpd, rmpdec.MPD_POS, rmpdec.MPD_NAN) return w_result if rfloat.isinf(value): - w_result = space.allocate_instance(W_Decimal, w_subtype) - W_Decimal.__init__(w_result, space) + w_result = W_Decimal.allocate(space, w_subtype) rmpdec.mpd_setspecial(w_result.mpd, sign, rmpdec.MPD_INF) return w_result diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -203,3 +203,91 @@ for i in range(200): x = self.random_float() assert x == float(Decimal(x)) # roundtrip + + def test_explicit_context_create_decimal(self): + Decimal = self.decimal.Decimal + InvalidOperation = self.decimal.InvalidOperation + Rounded = self.decimal.Rounded + + nc = self.decimal.getcontext().copy() + nc.prec = 3 + nc.traps[InvalidOperation] = False + nc.traps[self.decimal.Overflow] = False + nc.traps[self.decimal.DivisionByZero] = False + + # empty + d = Decimal() + assert str(d) == '0' + d = nc.create_decimal() + assert str(d) == '0' + + # from None + raises(TypeError, nc.create_decimal, None) + + # from int + d = nc.create_decimal(456) + assert isinstance(d, Decimal) + assert nc.create_decimal(45678) == nc.create_decimal('457E+2') + + # from string + d = Decimal('456789') + assert str(d) == '456789' + d = nc.create_decimal('456789') + assert str(d) == '4.57E+5' + # leading and trailing whitespace should result in a NaN; + # spaces are already checked in Cowlishaw's test-suite, so + # here we just check that a trailing newline results in a NaN + assert str(nc.create_decimal('3.14\n')) == 'NaN' + + # from tuples + d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) + assert str(d) == '-4.34913534E-17' + d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) ) + assert str(d) == '-4.35E-17' + + # from Decimal + prevdec = Decimal(500000123) + d = Decimal(prevdec) + assert str(d) == '500000123' + d = nc.create_decimal(prevdec) + assert str(d) == '5.00E+8' + + # more integers + nc.prec = 28 + nc.traps[InvalidOperation] = True + + for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0, + 2**31-1, 2**31, 2**63-1, 2**63]: + d = nc.create_decimal(v) + assert isinstance(d, Decimal) + assert str(d) == str(v) + + nc.prec = 3 + nc.traps[Rounded] = True + raises(Rounded, nc.create_decimal, 1234) + + # from string + nc.prec = 28 + assert str(nc.create_decimal('0E-017')) == '0E-17' + assert str(nc.create_decimal('45')) == '45' + assert str(nc.create_decimal('-Inf')) == '-Infinity' + assert str(nc.create_decimal('NaN123')) == 'NaN123' + + # invalid arguments + raises(InvalidOperation, nc.create_decimal, "xyz") + raises(ValueError, nc.create_decimal, (1, "xyz", -25)) + raises(TypeError, nc.create_decimal, "1234", "5678") + + # too many NaN payload digits + nc.prec = 3 + raises(InvalidOperation, nc.create_decimal, 'NaN12345') + raises(InvalidOperation, nc.create_decimal, Decimal('NaN12345')) + + nc.traps[InvalidOperation] = False + assert str(nc.create_decimal('NaN12345')) == 'NaN' + assert nc.flags[InvalidOperation] + + nc.flags[InvalidOperation] = False + assert str(nc.create_decimal(Decimal('NaN12345'))) == 'NaN' + assert nc.flags[InvalidOperation] + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -43,10 +43,11 @@ "mpd_maxcontext", "mpd_qnew", "mpd_to_sci_size", - "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", + "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", "mpd_qpow", "mpd_qmul", + "mpd_qround_to_int", ], compile_extra=compile_extra, libraries=['m'], @@ -104,8 +105,15 @@ ('data', MPD_UINT_PTR), ]) MPD_CONTEXT_T = platform.Struct('mpd_context_t', - [('traps', rffi.UINT), + [('prec', lltype.Signed), + ('emax', lltype.Signed), + ('emin', lltype.Signed), + ('traps', rffi.UINT), ('status', rffi.UINT), + ('newtrap', rffi.UINT), + ('round', lltype.Signed), + ('clamp', lltype.Signed), + ('allcr', lltype.Signed), ]) @@ -183,6 +191,8 @@ 'mpd_isnegative', [MPD_PTR], rffi.INT) mpd_isinfinite = external( 'mpd_isinfinite', [MPD_PTR], rffi.INT) +mpd_isspecial = external( + 'mpd_isspecial', [MPD_PTR], rffi.INT) mpd_isnan = external( 'mpd_isnan', [MPD_PTR], rffi.INT) mpd_issnan = external( @@ -200,3 +210,7 @@ 'mpd_qmul', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) + +mpd_qround_to_int = external( + 'mpd_qround_to_int', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) From noreply at buildbot.pypy.org Sun May 11 00:27:50 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:50 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add passing test Message-ID: <20140510222750.0B2761C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71459:a24293c9d03b Date: 2014-05-10 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/a24293c9d03b/ Log: Add passing test diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -291,3 +291,26 @@ assert str(nc.create_decimal(Decimal('NaN12345'))) == 'NaN' assert nc.flags[InvalidOperation] + def test_explicit_context_create_from_float(self): + Decimal = self.decimal.Decimal + + nc = self.decimal.Context() + r = nc.create_decimal(0.1) + assert assertEqual(type(r)) is Decimal + assert str(r) == '0.1000000000000000055511151231' + assert nc.create_decimal(float('nan')).is_qnan() + assert nc.create_decimal(float('inf')).is_infinite() + assert nc.create_decimal(float('-inf')).is_infinite() + assert (str(nc.create_decimal(float('nan'))) == + str(nc.create_decimal('NaN'))) + assert (str(nc.create_decimal(float('inf'))) == + str(nc.create_decimal('Infinity'))) + assert (str(nc.create_decimal(float('-inf'))) == + str(nc.create_decimal('-Infinity'))) + assert (str(nc.create_decimal(float('-0.0'))) == + str(nc.create_decimal('-0'))) + nc.prec = 100 + for i in range(200): + x = self.random_float() + assert x == float(nc.create_decimal(x)) # roundtrip + From noreply at buildbot.pypy.org Sun May 11 00:27:51 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:51 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix translation Message-ID: <20140510222751.522741C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71460:e26eb44b77a7 Date: 2014-05-10 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/e26eb44b77a7/ Log: Fix translation diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -50,9 +50,9 @@ w_result = W_Decimal.allocate(space, w_subtype) with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: rmpdec.mpd_qcopy(w_result.mpd, self.mpd, status_ptr) - context.addstatus(space, status_ptr[0]) + context.addstatus(space, rffi.cast(lltype.Signed, status_ptr[0])) rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) - context.addstatus(space, status_ptr[0]) + context.addstatus(space, rffi.cast(lltype.Signed, status_ptr[0])) return w_result def descr_str(self, space): @@ -243,7 +243,9 @@ return w_result else: if (rmpdec.mpd_isnan(w_value.mpd) and - w_value.mpd.c_digits > (context.ctx.c_prec - context.ctx.c_clamp)): + w_value.mpd.c_digits > ( + context.ctx.c_prec - rffi.cast(lltype.Signed, + context.ctx.c_clamp))): # Special case: too many NaN payload digits context.addstatus(space, rmpdec.MPD_Conversion_syntax) w_result = W_Decimal.allocate(space, w_subtype) @@ -296,7 +298,7 @@ if not exact: with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) - context.addstatus(space, status_ptr[0]) + context.addstatus(space, rffi.cast(lltype.Signed, status_ptr[0])) return w_result def decimal_from_object(space, w_subtype, w_value, context, exact=True): From noreply at buildbot.pypy.org Sun May 11 00:27:52 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:52 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: add decimal.Context.__repr__ Message-ID: <20140510222752.847981C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71461:e6fb259b2e7e Date: 2014-05-10 18:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e6fb259b2e7e/ Log: add decimal.Context.__repr__ diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -11,6 +11,7 @@ interpleveldefs = { 'Decimal': 'interp_decimal.W_Decimal', 'Context': 'interp_context.W_Context', + 'DefaultContext': 'interp_context.W_Context(space)', 'getcontext': 'interp_context.getcontext', 'setcontext': 'interp_context.setcontext', 'DecimalException': 'interp_signals.get(space).w_DecimalException', diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -162,6 +162,12 @@ raise oefmt(space.w_ValueError, "valid range for Emax is [0, MAX_EMAX]") + def get_capitals(self, space): + return space.wrap(self.capitals) + + def set_capitals(self, space, w_value): + self.capitals = space.int_w(w_value) + def get_clamp(self, space): return space.wrap(rmpdec.mpd_getclamp(self.ctx)) @@ -176,6 +182,25 @@ return interp_decimal.decimal_from_object( space, None, w_value, self, exact=False) + def descr_repr(self, space): + # Rounding string. + rounding = rffi.cast(lltype.Signed, self.ctx.c_round) + for name, value in ROUND_CONSTANTS: + if value == rounding: + round_string = name + break + else: + raise oefmt(space.w_RuntimeError, + "bad rounding value") + flags = interp_signals.flags_as_string(self.ctx.c_status) + traps = interp_signals.flags_as_string(self.ctx.c_traps) + return space.wrap("Context(prec=%s, rounding=%s, Emin=%s, Emax=%s, " + "capitals=%s, clamp=%s, flags=%s, traps=%s)" % ( + self.ctx.c_prec, round_string, + self.ctx.c_emin, self.ctx.c_emax, + self.capitals, rffi.cast(lltype.Signed, self.ctx.c_clamp), + flags, traps)) + def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) @@ -190,10 +215,13 @@ traps=interp_attrproperty_w('w_traps', W_Context), prec=GetSetProperty(W_Context.get_prec, W_Context.set_prec), rounding=GetSetProperty(W_Context.get_rounding, W_Context.set_rounding), + capitals=GetSetProperty(W_Context.get_capitals, W_Context.set_capitals), Emin=GetSetProperty(W_Context.get_emin, W_Context.set_emin), Emax=GetSetProperty(W_Context.get_emax, W_Context.set_emax), clamp=GetSetProperty(W_Context.get_clamp, W_Context.set_clamp), # + __repr__ = interp2app(W_Context.descr_repr), + # copy=interp2app(W_Context.copy_w), create_decimal=interp2app(W_Context.create_decimal_w), ) diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -1,4 +1,6 @@ -from rpython.rlib import rmpdec +from collections import OrderedDict +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import rmpdec, rstring from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import oefmt, OperationError @@ -22,6 +24,18 @@ ('InvalidContext', rmpdec.MPD_Invalid_context), ]) +SIGNAL_STRINGS = OrderedDict([ + (rmpdec.MPD_Clamped, "Clamped"), + (rmpdec.MPD_IEEE_Invalid_operation, "InvalidOperation"), + (rmpdec.MPD_Division_by_zero, "DivisionByZero"), + (rmpdec.MPD_Inexact, "Inexact"), + (rmpdec.MPD_Float_operation, "FloatOperation"), + (rmpdec.MPD_Overflow, "Overflow"), + (rmpdec.MPD_Rounded, "Rounded"), + (rmpdec.MPD_Subnormal, "Subnormal"), + (rmpdec.MPD_Underflow, "Underflow"), + ]) + def flags_as_exception(space, flags): w_exc = None err_list = [] @@ -40,6 +54,20 @@ raise oefmt(space.w_KeyError, "invalid error flag") +def flags_as_string(flags): + builder = rstring.StringBuilder(30) + builder.append('[') + first = True + flags = rffi.cast(lltype.Signed, flags) + for (flag, value) in SIGNAL_STRINGS.items(): + if flag & flags: + if not first: + builder.append(', ') + first = False + builder.append(value) + builder.append(']') + return builder.build() + class SignalState: def __init__(self, space): diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/test/test_context.py @@ -0,0 +1,33 @@ +class AppTestContext: + spaceconfig = dict(usemodules=('_decimal',)) + + def setup_class(cls): + space = cls.space + cls.w_decimal = space.call_function(space.builtin.get('__import__'), + space.wrap("_decimal")) + cls.w_Decimal = space.getattr(cls.w_decimal, space.wrap("Decimal")) + + def test_context_repr(self): + c = self.decimal.DefaultContext.copy() + + c.prec = 425000000 + c.Emax = 425000000 + c.Emin = -425000000 + c.rounding = self.decimal.ROUND_HALF_DOWN + c.capitals = 0 + c.clamp = 1 + + d = self.decimal + OrderedSignals = [d.Clamped, d.Rounded, d.Inexact, d.Subnormal, + d.Underflow, d.Overflow, d.DivisionByZero, + d.InvalidOperation, d.FloatOperation] + for sig in OrderedSignals: + c.flags[sig] = False + c.traps[sig] = False + + s = c.__repr__() + t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \ + "Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \ + "flags=[], traps=[])" + assert s == t + diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -2,7 +2,7 @@ import random class AppTestExplicitConstruction: - spaceconfig = dict(usemodules=('_decimal', '_random')) + spaceconfig = dict(usemodules=('_decimal',)) def setup_class(cls): space = cls.space diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -80,6 +80,7 @@ MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') + MPD_MAX_SIGNAL_LIST = platform.ConstantInteger('MPD_MAX_SIGNAL_LIST') # Flags MPD_POS = platform.ConstantInteger('MPD_POS') From noreply at buildbot.pypy.org Sun May 11 00:27:53 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:53 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add 4 basic operations. Message-ID: <20140510222753.B90041C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71462:b81b759072ba Date: 2014-05-10 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/b81b759072ba/ Log: Add 4 basic operations. diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -111,6 +111,9 @@ if to_trap: raise interp_signals.flags_as_exception(space, to_trap) + def catch_status(self, space): + return ContextStatus(space, self) + def copy_w(self, space): w_copy = W_Context(space) rffi.structcopy(w_copy.ctx, self.ctx) @@ -247,6 +250,23 @@ context = getcontext(space) return context +class ContextStatus: + def __init__(self, space, context): + self.space = space + self.context = context + + def __enter__(self): + self.status_ptr = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 1, + flavor='raw', zero=True) + return self.context.ctx, self.status_ptr + + def __exit__(self, *args): + status = rffi.cast(lltype.Signed, self.status_ptr[0]) + lltype.free(self.status_ptr, flavor='raw') + # May raise a DecimalException + self.context.addstatus(self.space, status) + + class ConvContext: def __init__(self, space, mpd, context, exact): self.space = space diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -1,4 +1,5 @@ from rpython.rlib import rmpdec, rarithmetic, rbigint, rfloat +from rpython.rlib.objectmodel import specialize from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root @@ -48,11 +49,9 @@ def apply(self, space, context, w_subtype=None): # Apply the context to the input operand. Return a new W_Decimal. w_result = W_Decimal.allocate(space, w_subtype) - with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: + with context.catch_status(space) as (ctx, status_ptr): rmpdec.mpd_qcopy(w_result.mpd, self.mpd, status_ptr) - context.addstatus(space, rffi.cast(lltype.Signed, status_ptr[0])) rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) - context.addstatus(space, rffi.cast(lltype.Signed, status_ptr[0])) return w_result def descr_str(self, space): @@ -97,6 +96,46 @@ def descr_eq(self, space, w_other): return self.compare(space, w_other, 'eq') + # Operations + @staticmethod + def convert_op(space, w_value, context): + if isinstance(w_value, W_Decimal): + return None, w_value + elif space.isinstance_w(w_value, space.w_int): + value = space.bigint_w(w_value) + return None, decimal_from_bigint(space, None, value, context, + exact=True) + return space.w_NotImplemented, None + + def convert_binop(self, space, w_other, context): + w_err, w_a = W_Decimal.convert_op(space, self, context) + if w_err: + return w_err, None, None + w_err, w_b = W_Decimal.convert_op(space, w_other, context) + if w_err: + return w_err, None, None + return None, w_a, w_b + + def binary_number_method(self, space, mpd_func, w_other): + context = interp_context.getcontext(space) + + w_err, w_a, w_b = self.convert_binop(space, w_other, context) + if w_err: + return w_err + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) + return w_result + + def descr_add(self, space, w_other): + return self.binary_number_method(space, rmpdec.mpd_qadd, w_other) + def descr_sub(self, space, w_other): + return self.binary_number_method(space, rmpdec.mpd_qsub, w_other) + def descr_mul(self, space, w_other): + return self.binary_number_method(space, rmpdec.mpd_qmul, w_other) + def descr_truediv(self, space, w_other): + return self.binary_number_method(space, rmpdec.mpd_qdiv, w_other) + # Boolean functions def is_qnan_w(self, space): return space.wrap(bool(rmpdec.mpd_isqnan(self.mpd))) @@ -296,9 +335,8 @@ w_result.mpd.c_exp = - k if not exact: - with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: - rmpdec.mpd_qfinalize(w_result.mpd, context.ctx, status_ptr) - context.addstatus(space, rffi.cast(lltype.Signed, status_ptr[0])) + with context.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qfinalize(w_result.mpd, ctx, status_ptr) return w_result def decimal_from_object(space, w_subtype, w_value, context, exact=True): @@ -339,6 +377,12 @@ __bool__ = interp2app(W_Decimal.descr_bool), __float__ = interp2app(W_Decimal.descr_float), __eq__ = interp2app(W_Decimal.descr_eq), + # + __add__ = interp2app(W_Decimal.descr_add), + __sub__ = interp2app(W_Decimal.descr_sub), + __mul__ = interp2app(W_Decimal.descr_mul), + __truediv__ = interp2app(W_Decimal.descr_truediv), + # is_qnan = interp2app(W_Decimal.is_qnan_w), is_infinite = interp2app(W_Decimal.is_infinite_w), ) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -54,6 +54,8 @@ InvalidOperation = self.decimal.InvalidOperation localcontext = self.decimal.localcontext + self.decimal.getcontext().traps[InvalidOperation] = False + #empty assert str(Decimal('')) == 'NaN' @@ -296,7 +298,7 @@ nc = self.decimal.Context() r = nc.create_decimal(0.1) - assert assertEqual(type(r)) is Decimal + assert type(r) is Decimal assert str(r) == '0.1000000000000000055511151231' assert nc.create_decimal(float('nan')).is_qnan() assert nc.create_decimal(float('inf')).is_infinite() @@ -314,3 +316,10 @@ x = self.random_float() assert x == float(nc.create_decimal(x)) # roundtrip + def test_operations(self): + Decimal = self.decimal.Decimal + + assert Decimal(4) + Decimal(3) == Decimal(7) + assert Decimal(4) - Decimal(3) == Decimal(1) + assert Decimal(4) * Decimal(3) == Decimal(12) + assert Decimal(6) / Decimal(3) == Decimal(2) diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -46,7 +46,7 @@ "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", - "mpd_qpow", "mpd_qmul", + "mpd_qpow", "mpd_qadd", "mpd_qsub", "mpd_qmul", "mpd_qdiv", "mpd_qround_to_int", ], compile_extra=compile_extra, @@ -207,10 +207,22 @@ 'mpd_qpow', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qadd = external( + 'mpd_qadd', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qsub = external( + 'mpd_qsub', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) mpd_qmul = external( 'mpd_qmul', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qdiv = external( + 'mpd_qdiv', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) mpd_qround_to_int = external( 'mpd_qround_to_int', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], From noreply at buildbot.pypy.org Sun May 11 00:27:55 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:55 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Progress: Decimal.__repr__ Message-ID: <20140510222755.15BF41C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71463:423fc178afd4 Date: 2014-05-10 20:54 +0200 http://bitbucket.org/pypy/pypy/changeset/423fc178afd4/ Log: Progress: Decimal.__repr__ diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -67,6 +67,17 @@ rmpdec.mpd_free(cp) return space.wrap(result) # Convert bytes to unicode + def descr_repr(self, space): + context = interp_context.getcontext(space) + cp = rmpdec.mpd_to_sci(self.mpd, context.capitals) + if not cp: + raise OperationError(space.w_MemoryError, space.w_None) + try: + result = rffi.charp2str(cp) + finally: + rmpdec.mpd_free(cp) + return space.wrap("Decimal('%s')" % result) + def descr_bool(self, space): return space.wrap(not rmpdec.mpd_iszero(self.mpd)) @@ -374,6 +385,7 @@ 'Decimal', __new__ = interp2app(descr_new_decimal), __str__ = interp2app(W_Decimal.descr_str), + __repr__ = interp2app(W_Decimal.descr_repr), __bool__ = interp2app(W_Decimal.descr_bool), __float__ = interp2app(W_Decimal.descr_float), __eq__ = interp2app(W_Decimal.descr_eq), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -323,3 +323,113 @@ assert Decimal(4) - Decimal(3) == Decimal(1) assert Decimal(4) * Decimal(3) == Decimal(12) assert Decimal(6) / Decimal(3) == Decimal(2) + + def test_tostring_methods(self): + Decimal = self.decimal.Decimal + d = Decimal('15.32') + assert str(d) == '15.32' + assert repr(d) == "Decimal('15.32')" + + def test_tonum_methods(self): + #Test float and int methods. + Decimal = self.decimal.Decimal + + d1 = Decimal('66') + d2 = Decimal('15.32') + + #int + int(d1) == 66 + int(d2) == 15 + + #float + float(d1) == 66 + float(d2) == 15.32 + + #floor + test_pairs = [ + ('123.00', 123), + ('3.2', 3), + ('3.54', 3), + ('3.899', 3), + ('-2.3', -3), + ('-11.0', -11), + ('0.0', 0), + ('-0E3', 0), + ('89891211712379812736.1', 89891211712379812736), + ] + for d, i in test_pairs: + assert math.floor(Decimal(d)) == i + raises(ValueError, math.floor, Decimal('-NaN')) + raises(ValueError, math.floor, Decimal('sNaN')) + raises(ValueError, math.floor, Decimal('NaN123')) + raises(OverflowError, math.floor, Decimal('Inf')) + raises(OverflowError, math.floor, Decimal('-Inf')) + + #ceiling + test_pairs = [ + ('123.00', 123), + ('3.2', 4), + ('3.54', 4), + ('3.899', 4), + ('-2.3', -2), + ('-11.0', -11), + ('0.0', 0), + ('-0E3', 0), + ('89891211712379812736.1', 89891211712379812737), + ] + for d, i in test_pairs: + assert math.ceil(Decimal(d)) == i + raises(ValueError, math.ceil, Decimal('-NaN')) + raises(ValueError, math.ceil, Decimal('sNaN')) + raises(ValueError, math.ceil, Decimal('NaN123')) + raises(OverflowError, math.ceil, Decimal('Inf')) + raises(OverflowError, math.ceil, Decimal('-Inf')) + + #round, single argument + test_pairs = [ + ('123.00', 123), + ('3.2', 3), + ('3.54', 4), + ('3.899', 4), + ('-2.3', -2), + ('-11.0', -11), + ('0.0', 0), + ('-0E3', 0), + ('-3.5', -4), + ('-2.5', -2), + ('-1.5', -2), + ('-0.5', 0), + ('0.5', 0), + ('1.5', 2), + ('2.5', 2), + ('3.5', 4), + ] + for d, i in test_pairs: + assert round(Decimal(d)) == i + raises(ValueError, round, Decimal('-NaN')) + raises(ValueError, round, Decimal('sNaN')) + raises(ValueError, round, Decimal('NaN123')) + raises(OverflowError, round, Decimal('Inf')) + raises(OverflowError, round, Decimal('-Inf')) + + #round, two arguments; this is essentially equivalent + #to quantize, which is already extensively tested + test_triples = [ + ('123.456', -4, '0E+4'), + ('123.456', -3, '0E+3'), + ('123.456', -2, '1E+2'), + ('123.456', -1, '1.2E+2'), + ('123.456', 0, '123'), + ('123.456', 1, '123.5'), + ('123.456', 2, '123.46'), + ('123.456', 3, '123.456'), + ('123.456', 4, '123.4560'), + ('123.455', 2, '123.46'), + ('123.445', 2, '123.44'), + ('Inf', 4, 'NaN'), + ('-Inf', -23, 'NaN'), + ('sNaN314', 3, 'NaN314'), + ] + for d, n, r in test_triples: + assert str(round(Decimal(d), n)) == r + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -42,7 +42,7 @@ "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", "mpd_qnew", - "mpd_to_sci_size", + "mpd_to_sci", "mpd_to_sci_size", "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", @@ -182,6 +182,8 @@ 'mpd_seterror', [MPD_PTR, rffi.UINT, rffi.UINTP], lltype.Void) # Conversion +mpd_to_sci = external( + 'mpd_to_sci', [MPD_PTR, rffi.INT], rffi.CCHARP) mpd_to_sci_size = external( 'mpd_to_sci_size', [rffi.CCHARPP, MPD_PTR, rffi.INT], rffi.SSIZE_T) From noreply at buildbot.pypy.org Sun May 11 00:27:56 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:56 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add int() conversion, support for trunc(), round()... Message-ID: <20140510222756.4DAC41C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71464:502a9fa6ddd6 Date: 2014-05-10 23:03 +0200 http://bitbucket.org/pypy/pypy/changeset/502a9fa6ddd6/ Log: Add int() conversion, support for trunc(), round()... diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -94,6 +94,72 @@ w_s = self.descr_str(space) return space.call_function(space.w_float, w_s) + def to_long(self, space, context, round): + if rmpdec.mpd_isspecial(self.mpd): + if rmpdec.mpd_isnan(self.mpd): + raise oefmt(space.w_ValueError, + "cannot convert NaN to integer") + else: + raise oefmt(space.w_OverflowError, + "cannot convert Infinity to integer") + + w_x = W_Decimal.allocate(space) + w_tempctx = context.copy_w(space) + rffi.setintfield(w_tempctx.ctx, 'c_round', round) + with context.catch_status(space) as (ctx, status_ptr): + # We round with the temporary context, but set status and + # raise errors on the global one. + rmpdec.mpd_qround_to_int(w_x.mpd, self.mpd, + w_tempctx.ctx, status_ptr) + + # XXX mpd_qexport_u64 would be faster... + T = rffi.CArrayPtr(rffi.USHORTP).TO + with lltype.scoped_alloc(T, 1, zero=True) as digits_ptr: + n = rmpdec.mpd_qexport_u16( + digits_ptr, 0, 0x10000, + w_x.mpd, status_ptr) + if n == rmpdec.MPD_SIZE_MAX: + raise OperationError(space.w_MemoryError, space.w_None) + try: + char_ptr = rffi.cast(rffi.CCHARP, digits_ptr[0]) + size = rffi.cast(lltype.Signed, n) * 2 + s = rffi.charpsize2str(char_ptr, size) + finally: + rmpdec.mpd_free(digits_ptr[0]) + bigint = rbigint.rbigint.frombytes( + s, byteorder=rbigint.BYTEORDER, signed=False) + if rmpdec.mpd_isnegative(w_x.mpd) and not rmpdec.mpd_iszero(w_x.mpd): + bigint = bigint.neg() + return space.newlong_from_rbigint(bigint) + + def descr_int(self, space): + context = interp_context.getcontext(space) + return self.to_long(space, context, rmpdec.MPD_ROUND_DOWN) + + def descr_floor(self, space): + context = interp_context.getcontext(space) + return self.to_long(space, context, rmpdec.MPD_ROUND_FLOOR) + + def descr_ceil(self, space): + context = interp_context.getcontext(space) + return self.to_long(space, context, rmpdec.MPD_ROUND_CEILING) + + def descr_round(self, space, w_x=None): + context = interp_context.getcontext(space) + if not w_x: + return self.to_long(space, context, rmpdec.MPD_ROUND_HALF_EVEN) + x = space.int_w(w_x) + w_result = W_Decimal.allocate(space) + w_q = decimal_from_ssize(space, None, 1, context, exact=False) + if x == rmpdec.MPD_SSIZE_MIN: + w_q.mpd.c_exp = rmpdec.MPD_SSIZE_MAX + else: + w_q.mpd.c_exp = -x + with context.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qquantize(w_result.mpd, self.mpd, w_q.mpd, + ctx, status_ptr) + return w_result + def compare(self, space, w_other, op): if not isinstance(w_other, W_Decimal): # So far return space.w_NotImplemented @@ -388,6 +454,10 @@ __repr__ = interp2app(W_Decimal.descr_repr), __bool__ = interp2app(W_Decimal.descr_bool), __float__ = interp2app(W_Decimal.descr_float), + __int__ = interp2app(W_Decimal.descr_int), + __floor__ = interp2app(W_Decimal.descr_floor), + __ceil__ = interp2app(W_Decimal.descr_ceil), + __round__ = interp2app(W_Decimal.descr_round), __eq__ = interp2app(W_Decimal.descr_eq), # __add__ = interp2app(W_Decimal.descr_add), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -333,17 +333,21 @@ def test_tonum_methods(self): #Test float and int methods. Decimal = self.decimal.Decimal + InvalidOperation = self.decimal.InvalidOperation + self.decimal.getcontext().traps[InvalidOperation] = False + + import math d1 = Decimal('66') d2 = Decimal('15.32') #int - int(d1) == 66 - int(d2) == 15 + assert int(d1) == 66 + assert int(d2) == 15 #float - float(d1) == 66 - float(d2) == 15.32 + assert float(d1) == 66 + assert float(d2) == 15.32 #floor test_pairs = [ diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -37,6 +37,7 @@ ], export_symbols=[ "mpd_qset_ssize", "mpd_qset_uint", "mpd_qset_string", "mpd_qcopy", "mpd_setspecial", + "mpd_qimport_u32", "mpd_qexport_u32", "mpd_qexport_u16", "mpd_set_sign", "mpd_qfinalize", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", @@ -45,7 +46,7 @@ "mpd_to_sci", "mpd_to_sci_size", "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", - "mpd_qcmp", + "mpd_qcmp", "mpd_qquantize", "mpd_qpow", "mpd_qadd", "mpd_qsub", "mpd_qmul", "mpd_qdiv", "mpd_qround_to_int", ], @@ -81,6 +82,9 @@ 'MPD_IEEE_CONTEXT_MAX_BITS') MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') MPD_MAX_SIGNAL_LIST = platform.ConstantInteger('MPD_MAX_SIGNAL_LIST') + MPD_SIZE_MAX = platform.ConstantInteger('MPD_SIZE_MAX') + MPD_SSIZE_MAX = platform.ConstantInteger('MPD_SSIZE_MAX') + MPD_SSIZE_MIN = platform.ConstantInteger('MPD_SSIZE_MIN') # Flags MPD_POS = platform.ConstantInteger('MPD_POS') @@ -136,8 +140,17 @@ mpd_qset_string = external( 'mpd_qset_string', [MPD_PTR, rffi.CCHARP, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qimport_u32 = external( - 'mpd_qimport_u32', [MPD_PTR, rffi.UINTP, rffi.SIZE_T, - rffi.UCHAR, rffi.UINT, MPD_CONTEXT_PTR, rffi.UINTP], rffi.SIZE_T) + 'mpd_qimport_u32', [ + MPD_PTR, rffi.UINTP, rffi.SIZE_T, + rffi.UCHAR, rffi.UINT, MPD_CONTEXT_PTR, rffi.UINTP], rffi.SIZE_T) +mpd_qexport_u32 = external( + 'mpd_qexport_u32', [ + rffi.CArrayPtr(rffi.UINTP), rffi.SIZE_T, rffi.UINT, + MPD_PTR, rffi.UINTP], rffi.SIZE_T) +mpd_qexport_u16 = external( + 'mpd_qexport_u16', [ + rffi.CArrayPtr(rffi.USHORTP), rffi.SIZE_T, rffi.UINT, + MPD_PTR, rffi.UINTP], rffi.SIZE_T) mpd_qcopy = external( 'mpd_qcopy', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) mpd_setspecial = external( @@ -204,6 +217,9 @@ 'mpd_isqnan', [MPD_PTR], rffi.INT) mpd_qcmp = external( 'mpd_qcmp', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) +mpd_qquantize = external( + 'mpd_qquantize', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) mpd_qpow = external( 'mpd_qpow', From noreply at buildbot.pypy.org Sun May 11 00:27:57 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 11 May 2014 00:27:57 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Context.clear_flags() Message-ID: <20140510222757.7D0A31C06F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71465:739252dd6bf7 Date: 2014-05-10 23:08 +0200 http://bitbucket.org/pypy/pypy/changeset/739252dd6bf7/ Log: Add Context.clear_flags() diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -120,6 +120,12 @@ w_copy.capitals = self.capitals return w_copy + def clear_flags_w(self, space): + rffi.setintfield(self.ctx, 'c_status', 0) + + def clear_traps_w(self, space): + rffi.setintfield(self.ctx, 'c_traps', 0) + def get_prec(self, space): return space.wrap(rmpdec.mpd_getprec(self.ctx)) @@ -226,6 +232,8 @@ __repr__ = interp2app(W_Context.descr_repr), # copy=interp2app(W_Context.copy_w), + clear_flags=interp2app(W_Context.clear_flags_w), + clear_traps=interp2app(W_Context.clear_traps_w), create_decimal=interp2app(W_Context.create_decimal_w), ) From noreply at buildbot.pypy.org Sun May 11 19:59:13 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 11 May 2014 19:59:13 +0200 (CEST) Subject: [pypy-commit] pypy default: extract make_generator_entry_graph() from build_flow() Message-ID: <20140511175913.792BF1D2D8C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71466:2ef6be32a2ca Date: 2014-05-11 18:58 +0100 http://bitbucket.org/pypy/pypy/changeset/2ef6be32a2ca/ Log: extract make_generator_entry_graph() from build_flow() diff --git a/rpython/flowspace/generator.py b/rpython/flowspace/generator.py --- a/rpython/flowspace/generator.py +++ b/rpython/flowspace/generator.py @@ -1,6 +1,8 @@ """Flow graph building for generators""" from rpython.flowspace.argument import Signature +from rpython.flowspace.bytecode import HostCode +from rpython.flowspace.pygraph import PyGraph from rpython.flowspace.model import (Block, Link, Variable, Constant, checkgraph, const) from rpython.flowspace.operation import op @@ -13,6 +15,16 @@ _immutable_ = True _attrs_ = () +def make_generator_entry_graph(func): + code = HostCode._from_code(func.func_code) + graph = PyGraph(func, code) + block = graph.startblock + for name, w_value in zip(code.co_varnames, block.framestate.mergeable): + if isinstance(w_value, Variable): + w_value.rename(name) + return bootstrap_generator(graph) + + def bootstrap_generator(graph): # This is the first copy of the graph. We replace it with # a small bootstrap graph. diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -1,13 +1,13 @@ """Implements the main interface for flow graph creation: build_flow(). """ -from inspect import CO_NEWLOCALS +from inspect import CO_NEWLOCALS, isgeneratorfunction -from rpython.flowspace.model import Variable, checkgraph +from rpython.flowspace.model import checkgraph from rpython.flowspace.bytecode import HostCode from rpython.flowspace.flowcontext import (FlowContext, fixeggblocks) from rpython.flowspace.generator import (tweak_generator_graph, - bootstrap_generator) + make_generator_entry_graph) from rpython.flowspace.pygraph import PyGraph @@ -33,15 +33,10 @@ Create the flow graph for the function. """ _assert_rpythonic(func) + if (isgeneratorfunction(func) and + not hasattr(func, '_generator_next_method_of_')): + return make_generator_entry_graph(func) code = HostCode._from_code(func.func_code) - if (code.is_generator and - not hasattr(func, '_generator_next_method_of_')): - graph = PyGraph(func, code) - block = graph.startblock - for name, w_value in zip(code.co_varnames, block.framestate.mergeable): - if isinstance(w_value, Variable): - w_value.rename(name) - return bootstrap_generator(graph) graph = PyGraph(func, code) ctx = FlowContext(graph, code) ctx.build_flow() From noreply at buildbot.pypy.org Sun May 11 23:02:28 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 11 May 2014 23:02:28 +0200 (CEST) Subject: [pypy-commit] pypy default: inline bootstrap_generator() Message-ID: <20140511210228.B46601D236E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71467:67067c56ae1a Date: 2014-05-11 22:01 +0100 http://bitbucket.org/pypy/pypy/changeset/67067c56ae1a/ Log: inline bootstrap_generator() diff --git a/rpython/flowspace/generator.py b/rpython/flowspace/generator.py --- a/rpython/flowspace/generator.py +++ b/rpython/flowspace/generator.py @@ -16,19 +16,16 @@ _attrs_ = () def make_generator_entry_graph(func): + # This is the first copy of the graph. We replace it with + # a small bootstrap graph. code = HostCode._from_code(func.func_code) graph = PyGraph(func, code) block = graph.startblock for name, w_value in zip(code.co_varnames, block.framestate.mergeable): if isinstance(w_value, Variable): w_value.rename(name) - return bootstrap_generator(graph) - - -def bootstrap_generator(graph): - # This is the first copy of the graph. We replace it with - # a small bootstrap graph. - GeneratorIterator = make_generatoriterator_class(graph) + varnames = get_variable_names(graph.startblock.inputargs) + GeneratorIterator = make_generatoriterator_class(varnames) replace_graph_with_bootstrap(GeneratorIterator, graph) # We attach a 'next' method to the GeneratorIterator class # that will invoke the real function, based on a second @@ -42,11 +39,11 @@ tweak_generator_body_graph(GeneratorIterator.Entry, graph) -def make_generatoriterator_class(graph): +def make_generatoriterator_class(var_names): class GeneratorIterator(object): class Entry(AbstractPosition): _immutable_ = True - varnames = get_variable_names(graph.startblock.inputargs) + varnames = var_names def __init__(self, entry): self.current = entry @@ -84,7 +81,7 @@ self.current = next_entry return return_value GeneratorIterator.next = next - return func # for debugging + graph._tweaked_func = func # for testing def get_variable_names(variables): seen = set() diff --git a/rpython/flowspace/test/test_generator.py b/rpython/flowspace/test/test_generator.py --- a/rpython/flowspace/test/test_generator.py +++ b/rpython/flowspace/test/test_generator.py @@ -1,8 +1,8 @@ from rpython.conftest import option from rpython.flowspace.objspace import build_flow from rpython.flowspace.model import Variable -from rpython.flowspace.generator import (make_generatoriterator_class, - replace_graph_with_bootstrap, get_variable_names, attach_next_method) +from rpython.flowspace.generator import ( + make_generator_entry_graph, get_variable_names) from rpython.translator.simplify import join_blocks @@ -93,14 +93,11 @@ yield n + 1 z -= 10 # - graph = build_flow(f) - GeneratorIterator = make_generatoriterator_class(graph) - replace_graph_with_bootstrap(GeneratorIterator, graph) - func1 = attach_next_method(GeneratorIterator, graph) + graph = make_generator_entry_graph(f) + func1 = graph._tweaked_func if option.view: graph.show() - # - assert func1._generator_next_method_of_ is GeneratorIterator + GeneratorIterator = graph._tweaked_func._generator_next_method_of_ assert hasattr(GeneratorIterator, 'next') # graph_next = build_flow(GeneratorIterator.next.im_func) From noreply at buildbot.pypy.org Mon May 12 09:18:23 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 May 2014 09:18:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: make stm_hint_commit_soon visible to the JIT in order to not remove transaction Message-ID: <20140512071823.D2A371C0606@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71468:a43485667edf Date: 2014-05-12 09:16 +0200 http://bitbucket.org/pypy/pypy/changeset/a43485667edf/ Log: make stm_hint_commit_soon visible to the JIT in order to not remove transaction breaks right after it diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -237,7 +237,7 @@ return self.malloc_array(arraydescr.basesize, num_elem, arraydescr.itemsize, arraydescr.lendescr.offset) - + def get_malloc_slowpath_addr(self): return None @@ -257,7 +257,7 @@ class GcRootMap_shadowstack(object): is_shadow_stack = True is_stm = False - + def __init__(self, gcdescr): pass @@ -271,7 +271,7 @@ class GcRootMap_stm(object): is_shadow_stack = True is_stm = True - + def __init__(self, gcdescr): pass @@ -490,7 +490,7 @@ unicode_itemsize = self.unicode_descr.itemsize unicode_ofs_length = self.unicode_descr.lendescr.offset - + def malloc_str(length): type_id = llop.extract_ushort(llgroup.HALFWORD, str_type_id) return llop1.do_malloc_varsize_clear( @@ -499,7 +499,7 @@ str_ofs_length) self.generate_function('malloc_str', malloc_str, [lltype.Signed]) - + def malloc_unicode(length): type_id = llop.extract_ushort(llgroup.HALFWORD, unicode_type_id) return llop1.do_malloc_varsize_clear( @@ -529,6 +529,9 @@ self.generate_function('stm_try_inevitable', rstm.become_inevitable, [], RESULT=lltype.Void) + self.generate_function('stm_hint_commit_soon', + rstm.hint_commit_soon, [], + RESULT=lltype.Void) def _bh_malloc(self, sizedescr): from rpython.memory.gctypelayout import check_typeid @@ -603,7 +606,7 @@ def can_use_nursery_malloc(self, size): return size < self.max_size_of_young_obj - + def has_write_barrier_class(self): return WriteBarrierDescr @@ -612,7 +615,7 @@ def get_malloc_slowpath_array_addr(self): return self.get_malloc_fn_addr('malloc_array') - + # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -31,6 +31,10 @@ self.next_op_may_be_in_new_transaction() self.newops.append(op) return + if opnum == rop.STM_HINT_COMMIT_SOON: + self._do_stm_call('stm_hint_commit_soon', [], None, + op.stm_location) + return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): self.newops.append(op) diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -912,7 +912,12 @@ @arguments() def bhimpl_stm_transaction_break(): pass - + + @arguments() + def bhimpl_stm_hint_commit_soon(): + pass + + # ---------- # the main hints and recursive calls diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -127,7 +127,8 @@ opnum == rop.COPYSTRCONTENT or opnum == rop.COPYUNICODECONTENT): return - if opnum in (rop.GUARD_NOT_FORCED, rop.GUARD_NOT_FORCED_2): + if opnum in (rop.GUARD_NOT_FORCED, rop.GUARD_NOT_FORCED_2, + rop.STM_HINT_COMMIT_SOON): self.stm_break_wanted = True return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -31,7 +31,7 @@ def flush(self): # just in case. it shouldn't be necessary self.flush_cached() - + def default_emit(self, op): self.flush_cached() self.emit_operation(op) @@ -39,7 +39,7 @@ def _break_wanted(self): is_loop = self.optimizer.loop.is_really_loop return self.optimizer.stm_info.get('break_wanted', is_loop) - + def _set_break_wanted(self, val): self.optimizer.stm_info['break_wanted'] = val @@ -84,6 +84,11 @@ self.keep_but_ignore_gnf = False self.emit_operation(op) + def optimize_STM_HINT_COMMIT_SOON(self, op): + self.flush_cached() + self._set_break_wanted(True) + self.emit_operation(op) + dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', default=OptSTM.default_emit) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -70,7 +70,7 @@ i0 = stm_should_break_transaction() guard_false(i0) [] - + jump() """ expected = """ @@ -93,7 +93,7 @@ preamble = """ [p1] i1 = getfield_gc(p1, descr=adescr) - + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p1) @@ -102,7 +102,7 @@ [p1] i0 = stm_should_break_transaction() guard_false(i0) [] - + jump(p1) """ self.optimize_loop(ops, expected, expected_preamble=preamble) @@ -142,10 +142,10 @@ [] stm_transaction_break(0) guard_not_forced() [] - + escape() # e.g. like a call_release_gil guard_not_forced() [] - + stm_transaction_break(0) guard_not_forced() [] stm_transaction_break(0) @@ -164,7 +164,7 @@ stm_transaction_break(0) guard_not_forced() [] - + i0 = stm_should_break_transaction() guard_false(i0) [] jump() @@ -209,7 +209,7 @@ guard_not_forced() [] p6 = force_token() # not removed! - + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p0) @@ -224,7 +224,7 @@ escape() p6 = force_token() # not removed! - + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p0) @@ -234,7 +234,7 @@ escape() p6 = force_token() # not removed! - + i0 = stm_should_break_transaction() guard_false(i0) [] jump(p0) @@ -246,7 +246,7 @@ [p0, p1] setfield_gc(p0, p1, descr=adescr) stm_transaction_break(0) - + p2 = force_token() p3 = force_token() jump(p0, p1) @@ -264,9 +264,9 @@ [p0, p1] p2 = force_token() p3 = force_token() - + setfield_gc(p0, p1, descr=adescr) # moved here by other stuff... - jump(p0, p1) + jump(p0, p1) """ self.optimize_loop(ops, expected, expected_preamble=preamble) @@ -286,3 +286,46 @@ jump(i1, p1) """ self.optimize_loop(ops, expected) + + def test_add_tb_after_commit_soon(self): + ops = """ + [] + stm_transaction_break(0) + guard_not_forced() [] + + stm_hint_commit_soon() + + stm_transaction_break(0) + guard_not_forced() [] + stm_transaction_break(0) + guard_not_forced() [] + i0 = stm_should_break_transaction() + guard_false(i0) [] + jump() + """ + preamble = """ + [] + stm_transaction_break(0) + guard_not_forced() [] + + stm_hint_commit_soon() + + stm_transaction_break(0) + guard_not_forced() [] + + i0 = stm_should_break_transaction() + guard_false(i0) [] + jump() + """ + expected = """ + [] + stm_hint_commit_soon() + + stm_transaction_break(0) + guard_not_forced() [] + + i0 = stm_should_break_transaction() + guard_false(i0) [] + jump() + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -224,6 +224,13 @@ # because of that self._record_stm_transaction_break(True) + @arguments() + def opimpl_stm_hint_commit_soon(self): + mi = self.metainterp + mi.history.record(rop.STM_HINT_COMMIT_SOON, [], None) + self.metainterp.heapcache.invalidate_caches(rop.STM_HINT_COMMIT_SOON, + None, []) + for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod', 'int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -495,6 +495,7 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'STM_SHOULD_BREAK_TRANSACTION/0', + 'STM_HINT_COMMIT_SOON/0', 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -55,13 +55,17 @@ rstm.jit_stm_should_break_transaction(True) # keep (True) rstm.jit_stm_should_break_transaction(True) # keep (True) rstm.jit_stm_should_break_transaction(False) + rstm.hint_commit_soon() + rstm.jit_stm_should_break_transaction(False) # keep + rstm.jit_stm_should_break_transaction(False) return 42 res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == 42 self.check_operations_history({ - 'stm_transaction_break':1, + 'stm_transaction_break':2, + 'stm_hint_commit_soon':1, 'stm_should_break_transaction':3, - 'guard_not_forced':2, + 'guard_not_forced':3, 'guard_no_exception':1, 'call_may_force':1}) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -49,7 +49,6 @@ return llop.jit_stm_should_break_transaction(lltype.Bool, if_there_is_no_other) - at dont_look_inside def hint_commit_soon(): """As the name says, just a hint. Maybe calling it several times in a row is more persuasive""" diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -438,7 +438,7 @@ 'stm_should_break_transaction': LLOp(sideeffects=False), 'stm_set_transaction_length': LLOp(), - 'stm_hint_commit_soon': LLOp(), + 'stm_hint_commit_soon': LLOp(canrun=True), 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(canmallocgc=True), # may allocate new array, diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -715,6 +715,9 @@ def op_jit_stm_transaction_break_point(): pass +def op_stm_hint_commit_soon(): + pass + # ____________________________________________________________ From noreply at buildbot.pypy.org Mon May 12 09:18:25 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 12 May 2014 09:18:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: less inevitable transactions when using print_abort_info Message-ID: <20140512071825.01A121C0606@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71469:65abf162d582 Date: 2014-05-12 09:17 +0200 http://bitbucket.org/pypy/pypy/changeset/65abf162d582/ Log: less inevitable transactions when using print_abort_info diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -50,16 +50,18 @@ info = _thread.longest_abort_info(mintime) if info is None: return + + output = [] with atomic: - print >> sys.stderr, "Conflict", + output.append("Conflict ") a, b, c, d = info try: reason = _timing_reasons[a] except IndexError: reason = "'%s'" % (a,) - print >> sys.stderr, reason, + output.append(reason) def show(line): - print >> sys.stderr, " ", line + output.append(" %s\n" % line) match = _r_line.match(line) if match and match.group(1) != '?': filename = match.group(1) @@ -82,14 +84,14 @@ filename = _fullfilenames[filename] line = linecache.getline(filename, lineno) if line: - print >> sys.stderr, " ", line.strip() + output.append(" %s\n" % line.strip()) if d: - print >> sys.stderr, "between two threads:" + output.append(" between two threads:\n") show(c) show(d) else: - print >> sys.stderr, "in this thread:" + output.append(" in this thread:\n") show(c) - print >> sys.stderr, 'Lost %.6f seconds.' % (b,) - print >> sys.stderr + output.append('Lost %.6f seconds.\n\n' % (b,)) _thread.reset_longest_abort_info() + print >> sys.stderr, "".join(output), From noreply at buildbot.pypy.org Mon May 12 19:55:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 12 May 2014 19:55:14 +0200 (CEST) Subject: [pypy-commit] pypy default: partially revert adbfbb5d604d, this was correct. test should be failing: nightly doesnt print irc topic Message-ID: <20140512175514.C940A1C103D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71470:68611bfdf4bd Date: 2014-05-12 13:54 -0400 http://bitbucket.org/pypy/pypy/changeset/68611bfdf4bd/ Log: partially revert adbfbb5d604d, this was correct. test should be failing: nightly doesnt print irc topic diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,9 +7,10 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir +from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = sys.version_info[3] == "final" +is_release = PYPY_VERSION[3] == "final" banner = sys.version.splitlines()[0] From noreply at buildbot.pypy.org Mon May 12 20:02:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 May 2014 20:02:18 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1764 Message-ID: <20140512180218.ECA4D1C10C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71471:cae86999e6c8 Date: 2014-05-12 18:30 +0200 http://bitbucket.org/pypy/pypy/changeset/cae86999e6c8/ Log: issue1764 Test and fix diff --git a/rpython/translator/c/gcc/instruction.py b/rpython/translator/c/gcc/instruction.py --- a/rpython/translator/c/gcc/instruction.py +++ b/rpython/translator/c/gcc/instruction.py @@ -184,6 +184,9 @@ def __init__(self): self.delta = -7.25 # use this non-integer value as a marker +class InsnPushed(InsnStackAdjust): + pass + class InsnStop(Insn): _args_ = ['reason'] def __init__(self, reason='?'): diff --git a/rpython/translator/c/gcc/test/elf64/track_random_rsp_rbp.s b/rpython/translator/c/gcc/test/elf64/track_random_rsp_rbp.s new file mode 100644 --- /dev/null +++ b/rpython/translator/c/gcc/test/elf64/track_random_rsp_rbp.s @@ -0,0 +1,158 @@ + .type seterror.part.1, @function +seterror.part.1: +.LFB77: + .cfi_startproc + pushq %r14 + .cfi_def_cfa_offset 16 + .cfi_offset 14, -16 + pushq %r13 + .cfi_def_cfa_offset 24 + .cfi_offset 13, -24 + pushq %r12 + .cfi_def_cfa_offset 32 + .cfi_offset 12, -32 + pushq %rbp + .cfi_def_cfa_offset 40 + .cfi_offset 6, -40 + pushq %rbx + .cfi_def_cfa_offset 48 + .cfi_offset 3, -48 + subq $512, %rsp + .cfi_def_cfa_offset 560 + testq %r8, %r8 + je .L30 +.L11: + movq PyPyExc_TypeError at GOTPCREL(%rip), %rax + movq %r8, %rsi + movq (%rax), %rdi + call PyPyErr_SetString at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq $512, %rsp + .cfi_remember_state + .cfi_def_cfa_offset 48 + popq %rbx + .cfi_def_cfa_offset 40 + popq %rbp + .cfi_def_cfa_offset 32 + popq %r12 + .cfi_def_cfa_offset 24 + popq %r13 + .cfi_def_cfa_offset 16 + popq %r14 + .cfi_def_cfa_offset 8 + ret + .p2align 4,,10 + .p2align 3 +.L30: + .cfi_restore_state + testq %rcx, %rcx + movq %rsi, %r12 + movl %edi, %r14d + movq %rdx, %r13 + movq %rsp, %rbp + movl $512, %esi + movq %rsp, %rbx + je .L13 + leaq .LC6(%rip), %rdx + movl $512, %esi + movq %rsp, %rdi + xorl %eax, %eax + movq %rsp, %rbx + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } +.L14: + movl (%rbx), %eax + addq $4, %rbx + leal -16843009(%rax), %esi + notl %eax + andl %eax, %esi + andl $-2139062144, %esi + je .L14 + movl %esi, %eax + shrl $16, %eax + testl $32896, %esi + cmove %eax, %esi + leaq 2(%rbx), %rax + cmove %rax, %rbx + addb %sil, %sil + movq %rbp, %rsi + sbbq $3, %rbx + subq %rbx, %rsi + addq $512, %rsi +.L13: + testl %r14d, %r14d + je .L16 + leaq .LC7(%rip), %rdx + movq %rbx, %rdi + movl %r14d, %ecx + xorl %eax, %eax + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbx, %rdi + call strlen at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq %rax, %rbx + movl 0(%r13), %eax + testl %eax, %eax + jle .L18 + movq %rbx, %rdx + subq %rbp, %rdx + cmpl $219, %edx + jg .L18 + addq $4, %r13 + xorl %r14d, %r14d + .p2align 4,,10 + .p2align 3 +.L21: + movq %rbp, %rsi + leal -1(%rax), %ecx + leaq .LC8(%rip), %rdx + subq %rbx, %rsi + movq %rbx, %rdi + xorl %eax, %eax + addq $512, %rsi + addl $1, %r14d + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbx, %rdi + call strlen at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq %rax, %rbx + movl 0(%r13), %eax + testl %eax, %eax + jle .L18 + cmpl $32, %r14d + je .L18 + movq %rbx, %rdx + addq $4, %r13 + subq %rbp, %rdx + cmpl $219, %edx + jle .L21 + jmp .L18 + .p2align 4,,10 + .p2align 3 +.L16: + leaq .LC9(%rip), %rdx + movq %rbx, %rdi + xorl %eax, %eax + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbx, %rdi + call strlen at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq %rax, %rbx +.L18: + movq %rbp, %rsi + leaq .LC10(%rip), %rdx + movq %r12, %rcx + subq %rbx, %rsi + movq %rbx, %rdi + xorl %eax, %eax + addq $512, %rsi + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbp, %r8 + jmp .L11 + .cfi_endproc +.LFE77: + .size seterror.part.1, .-seterror.part.1 diff --git a/rpython/translator/c/gcc/test/test_trackgcroot.py b/rpython/translator/c/gcc/test/test_trackgcroot.py --- a/rpython/translator/c/gcc/test/test_trackgcroot.py +++ b/rpython/translator/c/gcc/test/test_trackgcroot.py @@ -130,7 +130,7 @@ elif format == 'darwin' or format == 'darwin64': py.test.skip("disabled on OS/X's terribly old gcc") else: - r_globallabel = re.compile(r"([\w]+)=[.]+") + r_globallabel = re.compile(r"([\w.]+)=[.]+") print print path.dirpath().basename + '/' + path.basename lines = path.readlines() diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -8,7 +8,7 @@ from rpython.translator.c.gcc.instruction import InsnSetLocal, InsnCopyLocal from rpython.translator.c.gcc.instruction import InsnPrologue, InsnEpilogue from rpython.translator.c.gcc.instruction import InsnGCROOT, InsnCondJump -from rpython.translator.c.gcc.instruction import InsnStackAdjust +from rpython.translator.c.gcc.instruction import InsnStackAdjust, InsnPushed from rpython.translator.c.gcc.instruction import InsnCannotFollowEsp from rpython.translator.c.gcc.instruction import LocalVar, somenewvalue from rpython.translator.c.gcc.instruction import frameloc_esp, frameloc_ebp @@ -665,14 +665,22 @@ match = self.r_unaryinsn.match(line) source = match.group(1) return self.insns_for_copy(source, self.TOP_OF_STACK_MINUS_WORD) + \ - [InsnStackAdjust(-self.WORD)] + [InsnPushed(-self.WORD)] def _visit_pop(self, target): return [InsnStackAdjust(+self.WORD)] + \ self.insns_for_copy(self.TOP_OF_STACK_MINUS_WORD, target) def _visit_prologue(self): - # for the prologue of functions that use %ebp as frame pointer + # For the prologue of functions that use %ebp as frame pointer. + # First, find the latest InsnStackAdjust; if it's not a PUSH, + # then consider that this 'mov %rsp, %rbp' is actually unrelated + i = -1 + while not isinstance(self.insns[i], InsnStackAdjust): + i -= 1 + if not isinstance(self.insns[i], InsnPushed): + return [] + # self.uses_frame_pointer = True self.r_localvar = self.r_localvarfp return [InsnPrologue(self.WORD)] From noreply at buildbot.pypy.org Mon May 12 20:02:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 May 2014 20:02:20 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140512180220.59CC51C10C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71472:3d3d331a4e18 Date: 2014-05-12 20:01 +0200 http://bitbucket.org/pypy/pypy/changeset/3d3d331a4e18/ Log: merge heads diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,9 +7,10 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir +from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = sys.version_info[3] == "final" +is_release = PYPY_VERSION[3] == "final" banner = sys.version.splitlines()[0] From noreply at buildbot.pypy.org Mon May 12 21:54:43 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 May 2014 21:54:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Add enough of gdbm.py to pass all the tests Message-ID: <20140512195443.390621C14DC@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71473:590bbc7f0af9 Date: 2014-05-12 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/590bbc7f0af9/ Log: Add enough of gdbm.py to pass all the tests diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py new file mode 100644 --- /dev/null +++ b/lib_pypy/gdbm.py @@ -0,0 +1,156 @@ + +import cffi, os + +ffi = cffi.FFI() +ffi.cdef(''' +#define GDBM_READER ... +#define GDBM_WRITER ... +#define GDBM_WRCREAT ... +#define GDBM_NEWDB ... +#define GDBM_FAST ... +#define GDBM_SYNC ... +#define GDBM_NOLOCK ... +#define GDBM_REPLACE ... + +void* gdbm_open(char *, int, int, int, void (*)()); +void gdbm_close(void*); + +typedef struct { + char *dptr; + int dsize; +} datum; + +datum gdbm_fetch(void*, datum); +int gdbm_delete(void*, datum); +int gdbm_store(void*, datum, datum, int); +int gdbm_exists(void*, datum); + +int gdbm_reorganize(void*); + +datum gdbm_firstkey(void*); +datum gdbm_nextkey(void*, datum); + +char* gdbm_strerror(int); +int gdbm_errno; + +void free(void*); +''') + +lib = ffi.verify(''' +#include "gdbm.h" +''', libraries=['gdbm']) + +class error(Exception): + pass + +def _fromstr(key): + return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} + +class gdbm(object): + ll_dbm = None + + def __init__(self, filename, iflags, mode): + self.size = -1 + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + if not res: + self._raise_from_errno() + self.ll_dbm = res + + def close(self): + if self.ll_dbm: + lib.gdbm_close(self.ll_dbm) + self.ll_dbm = None + + def _raise_from_errno(self): + if ffi.errno: + raise error(os.strerror(ffi.errno)) + raise error(lib.gdbm_strerror(lib.gdbm_errno)) + + def __setitem__(self, key, value): + self._check_closed() + r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self._raise_from_errno() + + def __delitem__(self, key): + self._check_closed() + res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) + + def __contains__(self, key): + self._check_closed() + return lib.gdbm_exists(self.ll_dbm, _fromstr(key)) + has_key = __contains__ + + def __getitem__(self, key): + self._check_closed() + drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) + if not drec.dptr: + raise KeyError(key) + res = ffi.string(drec.dptr, drec.size) + lib.free(drec.dptr) + return res + + def keys(self): + self._check_closed() + l = [] + key = lib.gdbm_firstkey(self.ll_dbm) + while key.dptr: + l.append(ffi.string(key.dptr, key.dsize)) + nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + lib.free(key.dptr) + key = nextkey + return l + + def firstkey(self): + self._check_closed() + key = lib.gdbm_firstkey(self.ll_dbm) + if key.dptr: + res = ffi.string(key.dptr, key.dsize) + lib.free(key.dptr) + return res + + def nextkey(self, key): + self._check_closed() + key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) + if key.dptr: + res = ffi.string(key.dptr, key.dsize) + lib.free(key.dptr) + return res + + def reorganize(self): + self._check_closed() + if lib.gdbm_reorganize(self.ll_dbm) < 0: + self._raise_from_errno() + + def _check_closed(self): + if not self.ll_dbm: + raise error("GDBM object has already been closed") + + __del__ = close + +def open(filename, flags='r', mode=0666): + if flags[0] == 'r': + iflags = lib.GDBM_READER + elif flags[0] == 'w': + iflags = lib.GDBM_WRITER + elif flags[0] == 'c': + iflags = lib.GDBM_WRCREAT + elif flags[0] == 'n': + iflags = lib.GDBM_NEWDB + else: + raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + for flag in flags[1:]: + if flag == 'f': + iflags |= lib.GDBM_FAST + elif flag == 's': + iflags |= lib.GDBM_SYNC + elif flag == 'u': + iflags |= lib.GDBM_NOLOCK + else: + raise error("Flag '%s' not supported" % flag) + return gdbm(filename, iflags, mode) + +open_flags = "rwcnfsu" From noreply at buildbot.pypy.org Mon May 12 21:57:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 May 2014 21:57:26 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Add an optional "pure" flag to bh_getfield_gc & friends, as well as to raw_load. Message-ID: <20140512195726.6F6D21C14DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71474:e7566140001b Date: 2014-05-12 19:58 +0200 http://bitbucket.org/pypy/pypy/changeset/e7566140001b/ Log: Add an optional "pure" flag to bh_getfield_gc & friends, as well as to raw_load. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -456,32 +456,37 @@ bh_call_f = _do_call bh_call_v = _do_call - def bh_getfield_gc(self, p, descr): + def _bh_getfield(self, p, descr, pure=False): p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure = bh_getfield_gc - bh_getfield_gc_i = bh_getfield_gc - bh_getfield_gc_r = bh_getfield_gc - bh_getfield_gc_f = bh_getfield_gc + direct_getfield_gc = _bh_getfield + direct_getfield_gc_pure = _bh_getfield + direct_getfield_raw = _bh_getfield + direct_getfield_raw_pure = _bh_getfield - bh_getfield_raw = bh_getfield_gc - bh_getfield_raw_pure = bh_getfield_raw - bh_getfield_raw_i = bh_getfield_raw - bh_getfield_raw_r = bh_getfield_raw - bh_getfield_raw_f = bh_getfield_raw + bh_getfield_gc_i = _bh_getfield + bh_getfield_gc_r = _bh_getfield + bh_getfield_gc_f = _bh_getfield - def bh_setfield_gc(self, p, newvalue, descr): + bh_getfield_raw_i = _bh_getfield + bh_getfield_raw_r = _bh_getfield + bh_getfield_raw_f = _bh_getfield + + def _bh_setfield(self, p, newvalue, descr): p = support.cast_arg(lltype.Ptr(descr.S), p) setattr(p, descr.fieldname, support.cast_arg(descr.FIELD, newvalue)) - bh_setfield_gc_i = bh_setfield_gc - bh_setfield_gc_r = bh_setfield_gc - bh_setfield_gc_f = bh_setfield_gc + direct_setfield_gc = _bh_setfield + direct_setfield_raw = _bh_setfield - bh_setfield_raw = bh_setfield_gc - bh_setfield_raw_i = bh_setfield_raw - bh_setfield_raw_f = bh_setfield_raw + bh_setfield_gc_i = _bh_setfield + bh_setfield_gc_r = _bh_setfield + bh_setfield_gc_f = _bh_setfield + + bh_setfield_raw = _bh_setfield + bh_setfield_raw_i = _bh_setfield + bh_setfield_raw_f = _bh_setfield def bh_arraylen_gc(self, a, descr): array = a._obj.container @@ -489,35 +494,39 @@ array = getattr(array, descr.OUTERA._arrayfld) return array.getlength() - def bh_getarrayitem_gc(self, a, index, descr): + def _bh_getarrayitem(self, a, index, descr, pure=False): a = support.cast_arg(lltype.Ptr(descr.A), a) array = a._obj return support.cast_result(descr.A.OF, array.getitem(index)) - bh_getarrayitem_gc_pure = bh_getarrayitem_gc - bh_getarrayitem_gc_i = bh_getarrayitem_gc - bh_getarrayitem_gc_r = bh_getarrayitem_gc - bh_getarrayitem_gc_f = bh_getarrayitem_gc + direct_getarrayitem_gc = _bh_getarrayitem + direct_getarrayitem_gc_pure = _bh_getarrayitem + direct_getarrayitem_raw = _bh_getarrayitem + direct_getarrayitem_raw_pure = _bh_getarrayitem - bh_getarrayitem_raw = bh_getarrayitem_gc - bh_getarrayitem_raw_pure = bh_getarrayitem_raw - bh_getarrayitem_raw_i = bh_getarrayitem_raw - bh_getarrayitem_raw_r = bh_getarrayitem_raw - bh_getarrayitem_raw_f = bh_getarrayitem_raw + bh_getarrayitem_gc_i = _bh_getarrayitem + bh_getarrayitem_gc_r = _bh_getarrayitem + bh_getarrayitem_gc_f = _bh_getarrayitem - def bh_setarrayitem_gc(self, a, index, item, descr): + bh_getarrayitem_raw_i = _bh_getarrayitem + bh_getarrayitem_raw_r = _bh_getarrayitem + bh_getarrayitem_raw_f = _bh_getarrayitem + + def _bh_setarrayitem(self, a, index, item, descr): a = support.cast_arg(lltype.Ptr(descr.A), a) array = a._obj array.setitem(index, support.cast_arg(descr.A.OF, item)) - bh_setarrayitem_gc_i = bh_setarrayitem_gc - bh_setarrayitem_gc_r = bh_setarrayitem_gc - bh_setarrayitem_gc_f = bh_setarrayitem_gc + direct_setarrayitem_gc = _bh_setarrayitem + direct_setarrayitem_raw = _bh_setarrayitem - bh_setarrayitem_raw = bh_setarrayitem_gc - bh_setarrayitem_raw_i = bh_setarrayitem_raw - bh_setarrayitem_raw_r = bh_setarrayitem_raw - bh_setarrayitem_raw_f = bh_setarrayitem_raw + bh_setarrayitem_gc_i = _bh_setarrayitem + bh_setarrayitem_gc_r = _bh_setarrayitem + bh_setarrayitem_gc_f = _bh_setarrayitem + + bh_setarrayitem_raw_i = _bh_setarrayitem + bh_setarrayitem_raw_r = _bh_setarrayitem + bh_setarrayitem_raw_f = _bh_setarrayitem def bh_getinteriorfield_gc(self, a, index, descr): array = a._obj.container @@ -1072,12 +1081,17 @@ return _op_default_implementation def _new_execute(opname): + if hasattr(LLGraphCPU, 'direct_' + opname): + methname = 'direct_' + opname + else: + methname = 'bh_' + opname + # def execute(self, descr, *args): if descr is not None: new_args = args + (descr,) else: new_args = args - return getattr(self.cpu, 'bh_' + opname)(*new_args) + return getattr(self.cpu, methname)(*new_args) execute.func_name = 'execute_' + opname return execute diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -385,14 +385,14 @@ # ____________________ RAW PRIMITIVES ________________________ @specialize.argtype(1) - def read_int_at_mem(self, gcref, ofs, size, sign): + def read_int_at_mem(self, gcref, ofs, size, sign, pure=False): for STYPE, UTYPE, itemsize in unroll_basic_sizes: if size == itemsize: if sign: - val = llop.raw_load(STYPE, gcref, ofs) + val = llop.raw_load(STYPE, gcref, ofs, pure) val = rffi.cast(lltype.Signed, val) else: - val = llop.raw_load(UTYPE, gcref, ofs) + val = llop.raw_load(UTYPE, gcref, ofs, pure) val = rffi.cast(lltype.Signed, val) return val else: @@ -409,8 +409,8 @@ raise NotImplementedError("size = %d" % size) @specialize.argtype(1) - def read_ref_at_mem(self, gcref, ofs): - return llop.raw_load(llmemory.GCREF, gcref, ofs) + def read_ref_at_mem(self, gcref, ofs, pure=False): + return llop.raw_load(llmemory.GCREF, gcref, ofs, pure) # non- at specialized: must only be called with llmemory.GCREF def write_ref_at_mem(self, gcref, ofs, newvalue): @@ -418,8 +418,8 @@ # the write barrier is implied above @specialize.argtype(1) - def read_float_at_mem(self, gcref, ofs): - return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs) + def read_float_at_mem(self, gcref, ofs, pure=False): + return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs, pure) @specialize.argtype(1) def write_float_at_mem(self, gcref, ofs, newvalue): @@ -461,23 +461,23 @@ def bh_arraylen_gc(self, array, arraydescr): assert isinstance(arraydescr, ArrayDescr) ofs = arraydescr.lendescr.offset - return self.read_int_at_mem(array, ofs, WORD, 1) + return self.read_int_at_mem(array, ofs, WORD, 1, True) @specialize.argtype(1) - def bh_getarrayitem_gc_i(self, gcref, itemindex, arraydescr): + def bh_getarrayitem_gc_i(self, gcref, itemindex, arraydescr, pure=False): ofs, size, sign = self.unpack_arraydescr_size(arraydescr) return self.read_int_at_mem(gcref, ofs + itemindex * size, size, - sign) + sign, pure) - def bh_getarrayitem_gc_r(self, gcref, itemindex, arraydescr): + def bh_getarrayitem_gc_r(self, gcref, itemindex, arraydescr, pure=False): ofs = self.unpack_arraydescr(arraydescr) - return self.read_ref_at_mem(gcref, itemindex * WORD + ofs) + return self.read_ref_at_mem(gcref, itemindex * WORD + ofs, pure) @specialize.argtype(1) - def bh_getarrayitem_gc_f(self, gcref, itemindex, arraydescr): + def bh_getarrayitem_gc_f(self, gcref, itemindex, arraydescr, pure=False): ofs = self.unpack_arraydescr(arraydescr) fsize = rffi.sizeof(longlong.FLOATSTORAGE) - return self.read_float_at_mem(gcref, itemindex * fsize + ofs) + return self.read_float_at_mem(gcref, itemindex * fsize + ofs, pure) @specialize.argtype(1) def bh_setarrayitem_gc_i(self, gcref, itemindex, newvalue, arraydescr): @@ -557,19 +557,19 @@ return ord(u.chars[index]) @specialize.argtype(1) - def bh_getfield_gc_i(self, struct, fielddescr): + def bh_getfield_gc_i(self, struct, fielddescr, pure=False): ofs, size, sign = self.unpack_fielddescr_size(fielddescr) - return self.read_int_at_mem(struct, ofs, size, sign) + return self.read_int_at_mem(struct, ofs, size, sign, pure) @specialize.argtype(1) - def bh_getfield_gc_r(self, struct, fielddescr): + def bh_getfield_gc_r(self, struct, fielddescr, pure=False): ofs = self.unpack_fielddescr(fielddescr) - return self.read_ref_at_mem(struct, ofs) + return self.read_ref_at_mem(struct, ofs, pure) @specialize.argtype(1) - def bh_getfield_gc_f(self, struct, fielddescr): + def bh_getfield_gc_f(self, struct, fielddescr, pure=False): ofs = self.unpack_fielddescr(fielddescr) - return self.read_float_at_mem(struct, ofs) + return self.read_float_at_mem(struct, ofs, pure) bh_getfield_raw_i = bh_getfield_gc_i bh_getfield_raw_r = bh_getfield_gc_r diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -199,25 +199,25 @@ # lltype specific operations # -------------------------- - def bh_getarrayitem_gc_i(self, array, index, arraydescr): + def bh_getarrayitem_gc_i(self, array, index, arraydescr, pure=False): raise NotImplementedError - def bh_getarrayitem_gc_r(self, array, index, arraydescr): + def bh_getarrayitem_gc_r(self, array, index, arraydescr, pure=False): raise NotImplementedError - def bh_getarrayitem_gc_f(self, array, index, arraydescr): + def bh_getarrayitem_gc_f(self, array, index, arraydescr, pure=False): raise NotImplementedError - def bh_getfield_gc_i(self, struct, fielddescr): + def bh_getfield_gc_i(self, struct, fielddescr, pure=False): raise NotImplementedError - def bh_getfield_gc_r(self, struct, fielddescr): + def bh_getfield_gc_r(self, struct, fielddescr, pure=False): raise NotImplementedError - def bh_getfield_gc_f(self, struct, fielddescr): + def bh_getfield_gc_f(self, struct, fielddescr, pure=False): raise NotImplementedError - def bh_getfield_raw_i(self, struct, fielddescr): + def bh_getfield_raw_i(self, struct, fielddescr, pure=False): raise NotImplementedError - def bh_getfield_raw_r(self, struct, fielddescr): + def bh_getfield_raw_r(self, struct, fielddescr, pure=False): raise NotImplementedError - def bh_getfield_raw_f(self, struct, fielddescr): + def bh_getfield_raw_f(self, struct, fielddescr, pure=False): raise NotImplementedError def bh_new(self, sizedescr): diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2000,7 +2000,7 @@ r = self.cpu.bh_getfield_gc_i(r1.value, descrshort) assert r == 1313 self.cpu.bh_setfield_gc_i(r1.value, 1333, descrshort) - r = self.cpu.bh_getfield_gc_i(r1.value, descrshort) + r = self.cpu.bh_getfield_gc_i(r1.value, descrshort, pure=True) assert r == 1333 r = self.execute_operation(rop.GETFIELD_GC, [r1], 'int', descr=descrshort) @@ -2938,7 +2938,7 @@ b = lltype.malloc(B, 4) b[3] = a x = cpu.bh_getarrayitem_gc_r( - lltype.cast_opaque_ptr(llmemory.GCREF, b), 3, descr_B) + lltype.cast_opaque_ptr(llmemory.GCREF, b), 3, descr_B, pure=True) assert lltype.cast_opaque_ptr(lltype.Ptr(A), x) == a if self.cpu.supports_floats: C = lltype.GcArray(lltype.Float) @@ -2993,6 +2993,11 @@ descrfld_rx) assert x == ord('?') # + x = cpu.bh_getfield_raw_i( + heaptracker.adr2int(llmemory.cast_ptr_to_adr(rs)), + descrfld_rx, pure=True) + assert x == ord('?') + # cpu.bh_setfield_raw_i( heaptracker.adr2int(llmemory.cast_ptr_to_adr(rs)), ord('!'), descrfld_rx) @@ -3473,7 +3478,10 @@ expected = rffi.cast(lltype.Signed, rffi.cast(RESTYPE, value)) a[3] = rffi.cast(RESTYPE, value) a_rawint = heaptracker.adr2int(llmemory.cast_ptr_to_adr(a)) - x = cpu.bh_getarrayitem_raw_i(a_rawint, 3, descrarray) + x = cpu.bh_getarrayitem_raw_i(a_rawint, 3, descrarray, pure=False) + assert x == expected, ( + "%r: got %r, expected %r" % (RESTYPE, x, expected)) + x = cpu.bh_getarrayitem_raw_i(a_rawint, 3, descrarray, pure=True) assert x == expected, ( "%r: got %r, expected %r" % (RESTYPE, x, expected)) lltype.free(a, flavor='raw') diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -84,24 +84,39 @@ if condbox.getint(): do_call(cpu, metainterp, argboxes[1:], descr) -def do_getarrayitem_gc(cpu, _, arraybox, indexbox, arraydescr): +def _do_getarrayitem_gc(cpu, pure, arraybox, indexbox, arraydescr): array = arraybox.getref_base() index = indexbox.getint() if arraydescr.is_array_of_pointers(): - return BoxPtr(cpu.bh_getarrayitem_gc_r(array, index, arraydescr)) + return BoxPtr(cpu.bh_getarrayitem_gc_r(array, index, arraydescr, pure)) elif arraydescr.is_array_of_floats(): - return BoxFloat(cpu.bh_getarrayitem_gc_f(array, index, arraydescr)) + return BoxFloat(cpu.bh_getarrayitem_gc_f(array, index, arraydescr, + pure)) else: - return BoxInt(cpu.bh_getarrayitem_gc_i(array, index, arraydescr)) + return BoxInt(cpu.bh_getarrayitem_gc_i(array, index, arraydescr, pure)) -def do_getarrayitem_raw(cpu, _, arraybox, indexbox, arraydescr): +def do_getarrayitem_gc(cpu, _, arraybox, indexbox, arraydescr): + return _do_getarrayitem_gc(cpu, False, arraybox, indexbox, arraydescr) + +def do_getarrayitem_gc_pure(cpu, _, arraybox, indexbox, arraydescr): + return _do_getarrayitem_gc(cpu, True, arraybox, indexbox, arraydescr) + +def _do_getarrayitem_raw(cpu, pure, arraybox, indexbox, arraydescr): array = arraybox.getint() index = indexbox.getint() assert not arraydescr.is_array_of_pointers() if arraydescr.is_array_of_floats(): - return BoxFloat(cpu.bh_getarrayitem_raw_f(array, index, arraydescr)) + return BoxFloat(cpu.bh_getarrayitem_raw_f(array, index, arraydescr, + pure)) else: - return BoxInt(cpu.bh_getarrayitem_raw_i(array, index, arraydescr)) + return BoxInt(cpu.bh_getarrayitem_raw_i(array, index, arraydescr, + pure)) + +def do_getarrayitem_raw(cpu, _, arraybox, indexbox, arraydescr): + return _do_getarrayitem_raw(cpu, False, arraybox, indexbox, arraydescr) + +def do_getarrayitem_raw_pure(cpu, _, arraybox, indexbox, arraydescr): + return _do_getarrayitem_raw(cpu, True, arraybox, indexbox, arraydescr) def do_setarrayitem_gc(cpu, _, arraybox, indexbox, itembox, arraydescr): array = arraybox.getref_base() @@ -147,24 +162,36 @@ else: cpu.bh_setinteriorfield_gc_i(array, index, valuebox.getint(), descr) -def do_getfield_gc(cpu, _, structbox, fielddescr): +def _do_getfield_gc(cpu, pure, structbox, fielddescr): struct = structbox.getref_base() if fielddescr.is_pointer_field(): - return BoxPtr(cpu.bh_getfield_gc_r(struct, fielddescr)) + return BoxPtr(cpu.bh_getfield_gc_r(struct, fielddescr, pure)) elif fielddescr.is_float_field(): - return BoxFloat(cpu.bh_getfield_gc_f(struct, fielddescr)) + return BoxFloat(cpu.bh_getfield_gc_f(struct, fielddescr, pure)) else: - return BoxInt(cpu.bh_getfield_gc_i(struct, fielddescr)) + return BoxInt(cpu.bh_getfield_gc_i(struct, fielddescr, pure)) -def do_getfield_raw(cpu, _, structbox, fielddescr): +def do_getfield_gc(cpu, _, structbox, fielddescr): + return _do_getfield_gc(cpu, False, structbox, fielddescr) + +def do_getfield_gc_pure(cpu, _, structbox, fielddescr): + return _do_getfield_gc(cpu, True, structbox, fielddescr) + +def _do_getfield_raw(cpu, pure, structbox, fielddescr): check_descr(fielddescr) struct = structbox.getint() if fielddescr.is_pointer_field(): - return BoxPtr(cpu.bh_getfield_raw_r(struct, fielddescr)) + return BoxPtr(cpu.bh_getfield_raw_r(struct, fielddescr, pure)) elif fielddescr.is_float_field(): - return BoxFloat(cpu.bh_getfield_raw_f(struct, fielddescr)) + return BoxFloat(cpu.bh_getfield_raw_f(struct, fielddescr, pure)) else: - return BoxInt(cpu.bh_getfield_raw_i(struct, fielddescr)) + return BoxInt(cpu.bh_getfield_raw_i(struct, fielddescr, pure)) + +def do_getfield_raw(cpu, _, structbox, fielddescr): + return _do_getfield_raw(cpu, False, structbox, fielddescr) + +def do_getfield_raw_pure(cpu, _, structbox, fielddescr): + return _do_getfield_raw(cpu, True, structbox, fielddescr) def do_setfield_gc(cpu, _, structbox, itembox, fielddescr): struct = structbox.getref_base() @@ -310,14 +337,6 @@ execute[value] = globals()[name] continue # - # Maybe the same without the _PURE suffix? - if key.endswith('_PURE'): - key = key[:-5] - name = 'do_' + key.lower() - if name in globals(): - execute[value] = globals()[name] - continue - # # If missing, fallback to the bhimpl_xxx() method of the # blackhole interpreter. This only works if there is a # method of the exact same name and it accepts simple @@ -338,6 +357,7 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.CALL_PURE, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, rop.CALL_MALLOC_GC, diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -699,7 +699,7 @@ p = rffi.cast(rffi.CArrayPtr(TVAL), p + ofs) p[0] = newvalue -def op_raw_load(TVAL, p, ofs): +def op_raw_load(TVAL, p, ofs, pure=False): from rpython.rtyper.lltypesystem import rffi p = rffi.cast(llmemory.Address, p) p = rffi.cast(rffi.CArrayPtr(TVAL), p + ofs) diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -1,6 +1,7 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable from rpython.translator.unsimplify import varoftype from rpython.rtyper.lltypesystem import lltype +from rpython.translator.stm.support import is_immutable READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load']) @@ -18,22 +19,6 @@ else: raise AssertionError(v) -def is_immutable(op): - if op.opname in ('getfield', 'setfield'): - STRUCT = op.args[0].concretetype.TO - return STRUCT._immutable_field(op.args[1].value) - if op.opname in ('getarrayitem', 'setarrayitem'): - ARRAY = op.args[0].concretetype.TO - return ARRAY._immutable_field() - if op.opname == 'getinteriorfield': - OUTER = op.args[0].concretetype.TO - return OUTER._immutable_interiorfield(unwraplist(op.args[1:])) - if op.opname == 'setinteriorfield': - OUTER = op.args[0].concretetype.TO - return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) - if op.opname in ('raw_load', 'raw_store'): - return False - def insert_stm_read_barrier(transformer, graph): # We need to put enough 'stm_read' in the graph so that any diff --git a/rpython/translator/stm/support.py b/rpython/translator/stm/support.py --- a/rpython/translator/stm/support.py +++ b/rpython/translator/stm/support.py @@ -12,6 +12,8 @@ if op.opname == 'setinteriorfield': OUTER = op.args[0].concretetype.TO return OUTER._immutable_interiorfield(unwraplist(op.args[1:-1])) - if op.opname in ('raw_load', 'raw_store'): + if op.opname == 'raw_load': + return len(op.args) >= 3 and bool(op.args[2].value) + if op.opname == 'raw_store': return False raise AssertionError(op) From noreply at buildbot.pypy.org Mon May 12 21:57:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 May 2014 21:57:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140512195727.C1E9F1C14DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71475:f9b64722c5dc Date: 2014-05-12 20:08 +0200 http://bitbucket.org/pypy/pypy/changeset/f9b64722c5dc/ Log: Fix diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -10,15 +10,6 @@ def is_gc_ptr(T): return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' -def unwraplist(list_v): - for v in list_v: - if isinstance(v, Constant): - yield v.value - elif isinstance(v, Variable): - yield None # unknown - else: - raise AssertionError(v) - def insert_stm_read_barrier(transformer, graph): # We need to put enough 'stm_read' in the graph so that any diff --git a/rpython/translator/stm/support.py b/rpython/translator/stm/support.py --- a/rpython/translator/stm/support.py +++ b/rpython/translator/stm/support.py @@ -17,3 +17,12 @@ if op.opname == 'raw_store': return False raise AssertionError(op) + +def unwraplist(list_v): + for v in list_v: + if isinstance(v, Constant): + yield v.value + elif isinstance(v, Variable): + yield None # unknown + else: + raise AssertionError(v) From noreply at buildbot.pypy.org Mon May 12 21:57:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 May 2014 21:57:28 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Translation fixes and more tweaks Message-ID: <20140512195728.EDE711C14DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71476:8eaaea95de1a Date: 2014-05-12 21:56 +0200 http://bitbucket.org/pypy/pypy/changeset/8eaaea95de1a/ Log: Translation fixes and more tweaks diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -389,10 +389,16 @@ for STYPE, UTYPE, itemsize in unroll_basic_sizes: if size == itemsize: if sign: - val = llop.raw_load(STYPE, gcref, ofs, pure) + if pure: # raw_load's last arg should be constant + val = llop.raw_load(STYPE, gcref, ofs, True) + else: + val = llop.raw_load(STYPE, gcref, ofs) val = rffi.cast(lltype.Signed, val) else: - val = llop.raw_load(UTYPE, gcref, ofs, pure) + if pure: + val = llop.raw_load(UTYPE, gcref, ofs, True) + else: + val = llop.raw_load(UTYPE, gcref, ofs) val = rffi.cast(lltype.Signed, val) return val else: @@ -410,7 +416,10 @@ @specialize.argtype(1) def read_ref_at_mem(self, gcref, ofs, pure=False): - return llop.raw_load(llmemory.GCREF, gcref, ofs, pure) + if pure: + return llop.raw_load(llmemory.GCREF, gcref, ofs, True) + else: + return llop.raw_load(llmemory.GCREF, gcref, ofs) # non- at specialized: must only be called with llmemory.GCREF def write_ref_at_mem(self, gcref, ofs, newvalue): @@ -419,7 +428,10 @@ @specialize.argtype(1) def read_float_at_mem(self, gcref, ofs, pure=False): - return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs, pure) + if pure: + return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs, True) + else: + return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs) @specialize.argtype(1) def write_float_at_mem(self, gcref, ofs, newvalue): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1174,9 +1174,15 @@ def bhimpl_getarrayitem_gc_f(cpu, array, index, arraydescr): return cpu.bh_getarrayitem_gc_f(array, index, arraydescr) - bhimpl_getarrayitem_gc_i_pure = bhimpl_getarrayitem_gc_i - bhimpl_getarrayitem_gc_r_pure = bhimpl_getarrayitem_gc_r - bhimpl_getarrayitem_gc_f_pure = bhimpl_getarrayitem_gc_f + @arguments("cpu", "r", "i", "d", returns="i") + def bhimpl_getarrayitem_gc_i_pure(cpu, array, index, arraydescr): + return cpu.bh_getarrayitem_gc_i(array, index, arraydescr, pure=True) + @arguments("cpu", "r", "i", "d", returns="r") + def bhimpl_getarrayitem_gc_r_pure(cpu, array, index, arraydescr): + return cpu.bh_getarrayitem_gc_r(array, index, arraydescr, pure=True) + @arguments("cpu", "r", "i", "d", returns="f") + def bhimpl_getarrayitem_gc_f_pure(cpu, array, index, arraydescr): + return cpu.bh_getarrayitem_gc_f(array, index, arraydescr, pure=True) @arguments("cpu", "i", "i", "d", returns="i") def bhimpl_getarrayitem_raw_i(cpu, array, index, arraydescr): @@ -1185,8 +1191,12 @@ def bhimpl_getarrayitem_raw_f(cpu, array, index, arraydescr): return cpu.bh_getarrayitem_raw_f(array, index, arraydescr) - bhimpl_getarrayitem_raw_i_pure = bhimpl_getarrayitem_raw_i - bhimpl_getarrayitem_raw_f_pure = bhimpl_getarrayitem_raw_f + @arguments("cpu", "i", "i", "d", returns="i") + def bhimpl_getarrayitem_raw_i_pure(cpu, array, index, arraydescr): + return cpu.bh_getarrayitem_raw_i(array, index, arraydescr, pure=True) + @arguments("cpu", "i", "i", "d", returns="f") + def bhimpl_getarrayitem_raw_f_pure(cpu, array, index, arraydescr): + return cpu.bh_getarrayitem_raw_f(array, index, arraydescr, pure=True) @arguments("cpu", "r", "i", "i", "d") def bhimpl_setarrayitem_gc_i(cpu, array, index, newvalue, arraydescr): @@ -1272,9 +1282,15 @@ def bhimpl_getfield_gc_f(cpu, struct, fielddescr): return cpu.bh_getfield_gc_f(struct, fielddescr) - bhimpl_getfield_gc_i_pure = bhimpl_getfield_gc_i - bhimpl_getfield_gc_r_pure = bhimpl_getfield_gc_r - bhimpl_getfield_gc_f_pure = bhimpl_getfield_gc_f + @arguments("cpu", "r", "d", returns="i") + def bhimpl_getfield_gc_i_pure(cpu, struct, fielddescr): + return cpu.bh_getfield_gc_i(struct, fielddescr, pure=True) + @arguments("cpu", "r", "d", returns="r") + def bhimpl_getfield_gc_r_pure(cpu, struct, fielddescr): + return cpu.bh_getfield_gc_r(struct, fielddescr, pure=True) + @arguments("cpu", "r", "d", returns="f") + def bhimpl_getfield_gc_f_pure(cpu, struct, fielddescr): + return cpu.bh_getfield_gc_f(struct, fielddescr, pure=True) bhimpl_getfield_vable_i = bhimpl_getfield_gc_i bhimpl_getfield_vable_r = bhimpl_getfield_gc_r @@ -1287,17 +1303,19 @@ @arguments("cpu", "i", "d", returns="i") def bhimpl_getfield_raw_i(cpu, struct, fielddescr): return cpu.bh_getfield_raw_i(struct, fielddescr) - @arguments("cpu", "i", "d", returns="r") - def _bhimpl_getfield_raw_r(cpu, struct, fielddescr): - # only for 'getfield_raw_r_pure' - return cpu.bh_getfield_raw_r(struct, fielddescr) @arguments("cpu", "i", "d", returns="f") def bhimpl_getfield_raw_f(cpu, struct, fielddescr): return cpu.bh_getfield_raw_f(struct, fielddescr) - bhimpl_getfield_raw_i_pure = bhimpl_getfield_raw_i - bhimpl_getfield_raw_r_pure = _bhimpl_getfield_raw_r - bhimpl_getfield_raw_f_pure = bhimpl_getfield_raw_f + @arguments("cpu", "i", "d", returns="i") + def bhimpl_getfield_raw_i_pure(cpu, struct, fielddescr): + return cpu.bh_getfield_raw_i(struct, fielddescr, pure=True) + @arguments("cpu", "i", "d", returns="r") + def bhimpl_getfield_raw_r_pure(cpu, struct, fielddescr): + return cpu.bh_getfield_raw_r(struct, fielddescr, pure=True) + @arguments("cpu", "i", "d", returns="f") + def bhimpl_getfield_raw_f_pure(cpu, struct, fielddescr): + return cpu.bh_getfield_raw_f(struct, fielddescr, pure=True) @arguments("cpu", "r", "i", "d") def bhimpl_setfield_gc_i(cpu, struct, newvalue, fielddescr): diff --git a/rpython/translator/stm/support.py b/rpython/translator/stm/support.py --- a/rpython/translator/stm/support.py +++ b/rpython/translator/stm/support.py @@ -1,3 +1,4 @@ +from rpython.flowspace.model import Constant, Variable def is_immutable(op): if op.opname in ('getfield', 'setfield'): From noreply at buildbot.pypy.org Tue May 13 00:45:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 13 May 2014 00:45:02 +0200 (CEST) Subject: [pypy-commit] pypy default: remove tabs Message-ID: <20140512224502.E02F71C00B9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71477:328133895566 Date: 2014-05-12 18:44 -0400 http://bitbucket.org/pypy/pypy/changeset/328133895566/ Log: remove tabs diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -1,4 +1,3 @@ - import cffi, os ffi = cffi.FFI() @@ -16,8 +15,8 @@ void gdbm_close(void*); typedef struct { - char *dptr; - int dsize; + char *dptr; + int dsize; } datum; datum gdbm_fetch(void*, datum); @@ -48,14 +47,14 @@ class gdbm(object): ll_dbm = None - + def __init__(self, filename, iflags, mode): self.size = -1 res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) if not res: self._raise_from_errno() self.ll_dbm = res - + def close(self): if self.ll_dbm: lib.gdbm_close(self.ll_dbm) From noreply at buildbot.pypy.org Tue May 13 02:30:12 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 May 2014 02:30:12 +0200 (CEST) Subject: [pypy-commit] pypy default: kill unused generic_translate_operation Message-ID: <20140513003012.69C661D2D1A@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71478:d84e526e805a Date: 2014-05-13 01:29 +0100 http://bitbucket.org/pypy/pypy/changeset/d84e526e805a/ Log: kill unused generic_translate_operation diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -589,8 +589,6 @@ classdef = hop.s_result.classdef return rclass.rtype_new_instance(self, classdef, hop.llops) - generic_translate_operation = None - def default_translate_operation(self, hop): raise TyperError("unimplemented operation: '%s'" % hop.spaceop.opname) @@ -688,13 +686,8 @@ def dispatch(self): rtyper = self.rtyper - generic = rtyper.generic_translate_operation - if generic is not None: - res = generic(self) - if res is not None: - return res opname = self.forced_opname or self.spaceop.opname - translate_meth = getattr(rtyper, 'translate_op_'+opname, + translate_meth = getattr(rtyper, 'translate_op_' + opname, rtyper.default_translate_operation) return translate_meth(self) From noreply at buildbot.pypy.org Tue May 13 04:03:39 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 May 2014 04:03:39 +0200 (CEST) Subject: [pypy-commit] pypy default: kill rtyper_makekey_ex() Message-ID: <20140513020340.0CE641C14DC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71479:bfd362e5793e Date: 2014-05-13 03:03 +0100 http://bitbucket.org/pypy/pypy/changeset/bfd362e5793e/ Log: kill rtyper_makekey_ex() diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -104,7 +104,7 @@ return _rweakvaldict.WeakValueDictRepr(rtyper, rtyper.getrepr(self.s_key)) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def method_get(self, s_key): @@ -164,7 +164,7 @@ from rpython.rlib import _rweakkeydict return _rweakkeydict.WeakKeyDictRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def method_get(self, s_key): diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -220,8 +220,8 @@ from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr return ControlledInstanceRepr(rtyper, self.s_real_obj, self.controller) - def rtyper_makekey_ex(self, rtyper): - real_key = rtyper.makekey(self.s_real_obj) + def rtyper_makekey(self): + real_key = self.s_real_obj.rtyper_makekey() return self.__class__, real_key, self.controller _make_none_union("SomeControlledInstance", "obj.s_real_obj, obj.controller", globals()) diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -81,7 +81,7 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.StatResultRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def getattr(self, s_attr): @@ -115,7 +115,7 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.StatvfsResultRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def getattr(self, s_attr): diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -268,8 +268,8 @@ return EnumerateIteratorRepr(r_baseiter) return r_container.make_iterator_repr(*self.variant) - def rtyper_makekey_ex(self, rtyper): - return self.__class__, rtyper.makekey(self.s_container), self.variant + def rtyper_makekey(self): + return self.__class__, self.s_container.rtyper_makekey(), self.variant class __extend__(annmodel.SomeImpossibleValue): diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -18,9 +18,9 @@ def rtyper_makerepr(self, rtyper): return TupleRepr(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) - def rtyper_makekey_ex(self, rtyper): - keys = [rtyper.makekey(s_item) for s_item in self.items] - return tuple([self.__class__]+keys) + def rtyper_makekey(self): + keys = [s_item.rtyper_makekey() for s_item in self.items] + return tuple([self.__class__] + keys) _gen_eq_function_cache = {} diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -135,14 +135,9 @@ return key._as_ptr() raise KeyError(search) - def makekey(self, s_obj): - if hasattr(s_obj, "rtyper_makekey_ex"): - return s_obj.rtyper_makekey_ex(self) - return s_obj.rtyper_makekey() - def getrepr(self, s_obj): # s_objs are not hashable... try hard to find a unique key anyway - key = self.makekey(s_obj) + key = s_obj.rtyper_makekey() assert key[0] is s_obj.__class__ try: result = self.reprs[key] diff --git a/rpython/rtyper/test/test_rtyper.py b/rpython/rtyper/test/test_rtyper.py --- a/rpython/rtyper/test/test_rtyper.py +++ b/rpython/rtyper/test/test_rtyper.py @@ -22,9 +22,8 @@ annmodel.SomeInteger())) stup2 = annmodel.SomeTuple((annmodel.SomeString(), annmodel.SomeInteger())) - rtyper = RPythonTyper(annrpython.RPythonAnnotator(None)) - key1 = rtyper.makekey(stup1) - key2 = rtyper.makekey(stup2) + key1 = stup1.rtyper_makekey() + key2 = stup2.rtyper_makekey() assert key1 != key2 def test_simple(): From noreply at buildbot.pypy.org Tue May 13 10:43:29 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 10:43:29 +0200 (CEST) Subject: [pypy-commit] stmgc default: signal inevitable transaction to commit if we are waiting for it Message-ID: <20140513084329.8D36B1C00B9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1206:3b302406acd8 Date: 2014-05-13 10:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/3b302406acd8/ Log: signal inevitable transaction to commit if we are waiting for it diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -129,7 +129,8 @@ long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + if (other_pseg->transaction_state == TS_INEVITABLE) { if (tl_or_null_if_can_abort == NULL) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), @@ -141,6 +142,7 @@ else { /* wait for stm_commit_transaction() to finish this inevitable transaction */ + signal_other_to_commit_soon(other_pseg); change_timing_state_tl(tl_or_null_if_can_abort, STM_TIME_WAIT_INEVITABLE); cond_wait(C_INEVITABLE); @@ -264,7 +266,7 @@ static bool _safe_points_requested = false; #endif -static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) { assert(_has_mutex()); /* never overwrite abort signals or safepoint requests diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -38,3 +38,5 @@ static void committed_globally_unique_transaction(void); static bool pause_signalled, globally_unique_transaction; + +void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg); From noreply at buildbot.pypy.org Tue May 13 10:48:49 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 10:48:49 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc 3b302406acd8 Message-ID: <20140513084849.F34961C00B9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71480:d5b96230865f Date: 2014-05-13 10:46 +0200 http://bitbucket.org/pypy/pypy/changeset/d5b96230865f/ Log: import stmgc 3b302406acd8 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -31f9797a356c +3b302406acd8 diff --git a/rpython/translator/stm/src_stm/stm/contention.c b/rpython/translator/stm/src_stm/stm/contention.c --- a/rpython/translator/stm/src_stm/stm/contention.c +++ b/rpython/translator/stm/src_stm/stm/contention.c @@ -99,13 +99,14 @@ /************************************************************/ -static void contention_management(uint8_t other_segment_num, +static bool contention_management(uint8_t other_segment_num, enum contention_kind_e kind, object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); + bool others_may_have_run = false; if (must_abort()) abort_with_mutex(); @@ -153,6 +154,7 @@ if (contmgr.try_sleep && kind != WRITE_WRITE_CONTENTION && contmgr.other_pseg->safe_point != SP_WAIT_FOR_C_TRANSACTION_DONE) { + others_may_have_run = true; /* Sleep. - Not for write-write contentions, because we're not at a @@ -226,6 +228,7 @@ if (must_abort()) abort_with_mutex(); + others_may_have_run = true; dprintf(("contention: wait C_ABORTED...\n")); cond_wait(C_ABORTED); dprintf(("contention: done\n")); @@ -279,6 +282,7 @@ stmcb_commit_soon(); } } + return others_may_have_run; } static void write_write_contention_management(uintptr_t lock_idx, @@ -302,10 +306,10 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num, +static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); + return contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) diff --git a/rpython/translator/stm/src_stm/stm/contention.h b/rpython/translator/stm/src_stm/stm/contention.h --- a/rpython/translator/stm/src_stm/stm/contention.h +++ b/rpython/translator/stm/src_stm/stm/contention.h @@ -2,7 +2,7 @@ static void write_write_contention_management(uintptr_t lock_idx, object_t *obj); -static void write_read_contention_management(uint8_t other_segment_num, +static bool write_read_contention_management(uint8_t other_segment_num, object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -287,13 +287,15 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i, item); - - /* If we reach this point, we didn't abort, but maybe we - had to wait for the other thread to commit. If we - did, then we have to restart committing from our call - to synchronize_all_threads(). */ - return true; + if (write_read_contention_management(i, item)) { + /* If we reach this point, we didn't abort, but we + had to wait for the other thread to commit. If we + did, then we have to restart committing from our call + to synchronize_all_threads(). */ + return true; + } + /* we aborted the other transaction without waiting, so + we can just continue */ } })); } @@ -502,6 +504,9 @@ /* the call to minor_collection() above leaves us with STM_TIME_BOOKKEEPING */ + /* synchronize overflow objects living in privatized pages */ + push_overflow_objects_from_privatized_pages(); + s_mutex_lock(); restart: @@ -521,11 +526,11 @@ STM_SEGMENT->jmpbuf_ptr = NULL; /* if a major collection is required, do it here */ - if (is_major_collection_requested()) + if (is_major_collection_requested()) { + int oldstate = change_timing_state(STM_TIME_MAJOR_GC); major_collection_now_at_safe_point(); - - /* synchronize overflow objects living in privatized pages */ - push_overflow_objects_from_privatized_pages(); + change_timing_state(oldstate); + } /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -130,7 +130,8 @@ long i; restart: for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + if (other_pseg->transaction_state == TS_INEVITABLE) { if (tl_or_null_if_can_abort == NULL) { /* handle this case like a contention: it will either abort us (not the other thread, which is inevitable), @@ -142,6 +143,7 @@ else { /* wait for stm_commit_transaction() to finish this inevitable transaction */ + signal_other_to_commit_soon(other_pseg); change_timing_state_tl(tl_or_null_if_can_abort, STM_TIME_WAIT_INEVITABLE); cond_wait(C_INEVITABLE); @@ -265,7 +267,7 @@ static bool _safe_points_requested = false; #endif -static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) { assert(_has_mutex()); /* never overwrite abort signals or safepoint requests diff --git a/rpython/translator/stm/src_stm/stm/sync.h b/rpython/translator/stm/src_stm/stm/sync.h --- a/rpython/translator/stm/src_stm/stm/sync.h +++ b/rpython/translator/stm/src_stm/stm/sync.h @@ -39,3 +39,5 @@ static void committed_globally_unique_transaction(void); static bool pause_signalled, globally_unique_transaction; + +void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg); From noreply at buildbot.pypy.org Tue May 13 10:48:51 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 10:48:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Reduce the limit of inevitable transactions instead of setting it to 0. Message-ID: <20140513084851.396061C00B9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71481:4c7f65787861 Date: 2014-05-13 10:48 +0200 http://bitbucket.org/pypy/pypy/changeset/4c7f65787861/ Log: Reduce the limit of inevitable transactions instead of setting it to 0. Depend a bit on stmcb_commit_soon() in order for other transactions to signal us in case we block them. diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -29,7 +29,7 @@ /* atomic */ pypy_stm_nursery_low_fill_mark_saved = 0; } else { - pypy_stm_nursery_low_fill_mark >>= 2; + pypy_stm_nursery_low_fill_mark = 0; } } @@ -101,12 +101,8 @@ } } -void pypy_stm_start_transaction(stm_jmpbuf_t *jmpbuf_ptr, - volatile long *v_counter) +void _pypy_stm_initialize_nursery_low_fill_mark(long v_counter) { - pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */ - _stm_start_transaction(&stm_thread_local, jmpbuf_ptr); - /* If v_counter==0, initialize 'pypy_stm_nursery_low_fill_mark' from the configured length limit. If v_counter>0, we did an abort, and we now configure 'pypy_stm_nursery_low_fill_mark' @@ -120,8 +116,7 @@ counter = _htm_info.retry_counter; limit = pypy_transaction_length >> counter; #else - counter = *v_counter; - *v_counter = counter + 1; + counter = v_counter; if (counter == 0) { limit = pypy_transaction_length; @@ -133,6 +128,17 @@ #endif pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; +} + +void pypy_stm_start_transaction(stm_jmpbuf_t *jmpbuf_ptr, + volatile long *v_counter) +{ + pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */ + _stm_start_transaction(&stm_thread_local, jmpbuf_ptr); + + _pypy_stm_initialize_nursery_low_fill_mark(*v_counter); + *v_counter = *v_counter + 1; + pypy_stm_ready_atomic = 1; /* reset after abort */ } @@ -157,8 +163,6 @@ transaction. */ assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || - (pypy_stm_nursery_low_fill_mark == 0)); stm_commit_transaction(); @@ -195,8 +199,10 @@ //assert(pypy_stm_nursery_low_fill_mark != 0); assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); stm_commit_transaction(); - pypy_stm_nursery_low_fill_mark = 0; + stm_start_inevitable_transaction(&stm_thread_local); + _pypy_stm_initialize_nursery_low_fill_mark(0); + _pypy_stm_inev_state(); } else { assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); @@ -206,8 +212,6 @@ } /* double-check */ if (pypy_stm_ready_atomic == 1) { - assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || - (pypy_stm_nursery_low_fill_mark == 0)); } else { assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); @@ -219,14 +223,17 @@ assert(v_old_shadowstack == stm_thread_local.shadowstack); } -static void _pypy_stm_inev_state(void) +void _pypy_stm_inev_state(void) { + /* Reduce the limit so that inevitable transactions are generally + shorter. We depend a bit on stmcb_commit_soon() in order for + other transactions to signal us in case we block them. */ if (pypy_stm_ready_atomic == 1) { - pypy_stm_nursery_low_fill_mark = 0; + pypy_stm_nursery_low_fill_mark >>= 2; } else { assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); - pypy_stm_nursery_low_fill_mark_saved = 0; + pypy_stm_nursery_low_fill_mark_saved >>= 2; } } diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -23,6 +23,9 @@ void pypy_stm_register_thread_local(void); /* generated into stm_prebuilt.c */ void pypy_stm_unregister_thread_local(void); /* generated into stm_prebuilt.c */ +void _pypy_stm_initialize_nursery_low_fill_mark(long v_counter); +void _pypy_stm_inev_state(void); + void _pypy_stm_become_inevitable(const char *); void pypy_stm_become_globally_unique_transaction(void); @@ -52,8 +55,9 @@ static inline void pypy_stm_start_inevitable_if_not_atomic(void) { if (pypy_stm_ready_atomic == 1) { int e = errno; - pypy_stm_nursery_low_fill_mark = 0; stm_start_inevitable_transaction(&stm_thread_local); + _pypy_stm_initialize_nursery_low_fill_mark(0); + _pypy_stm_inev_state(); errno = e; } } @@ -73,8 +77,6 @@ case 1: pypy_stm_nursery_low_fill_mark = pypy_stm_nursery_low_fill_mark_saved; assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - assert(!(STM_SEGMENT->jmpbuf_ptr == NULL) || - (pypy_stm_nursery_low_fill_mark == 0)); break; case 0: pypy_stm_ready_atomic = 1; From noreply at buildbot.pypy.org Tue May 13 11:19:26 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 May 2014 11:19:26 +0200 (CEST) Subject: [pypy-commit] pypy default: some tests and some fixes Message-ID: <20140513091926.232811C044C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71482:c003cd1b188d Date: 2014-05-13 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c003cd1b188d/ Log: some tests and some fixes diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -74,6 +74,18 @@ size2 = os.path.getsize(filename) self.assertTrue(size1 > size2 >= size0) + def test_sync(self): + # check if sync works at all, not sure how to check it + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.sync() + + def test_get_key(self): + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g['x'], 'x' * 10000) def test_main(): run_unittest(TestGdbm) diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -29,6 +29,7 @@ datum gdbm_firstkey(void*); datum gdbm_nextkey(void*, datum); +void gdbm_sync(void*); char* gdbm_strerror(int); int gdbm_errno; @@ -50,7 +51,6 @@ ll_dbm = None def __init__(self, filename, iflags, mode): - self.size = -1 res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) if not res: self._raise_from_errno() @@ -89,7 +89,7 @@ drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) if not drec.dptr: raise KeyError(key) - res = ffi.string(drec.dptr, drec.size) + res = ffi.string(drec.dptr, drec.dsize) lib.free(drec.dptr) return res @@ -131,6 +131,10 @@ __del__ = close + def sync(self): + self._check_closed() + lib.gdbm_sync(self.ll_dbm) + def open(filename, flags='r', mode=0666): if flags[0] == 'r': iflags = lib.GDBM_READER From noreply at buildbot.pypy.org Tue May 13 11:19:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 May 2014 11:19:27 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140513091927.5C31C1C044C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71483:77c627036259 Date: 2014-05-13 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/77c627036259/ Log: merge diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -1,4 +1,3 @@ - import cffi, os ffi = cffi.FFI() @@ -16,8 +15,8 @@ void gdbm_close(void*); typedef struct { - char *dptr; - int dsize; + char *dptr; + int dsize; } datum; datum gdbm_fetch(void*, datum); @@ -49,13 +48,13 @@ class gdbm(object): ll_dbm = None - + def __init__(self, filename, iflags, mode): res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) if not res: self._raise_from_errno() self.ll_dbm = res - + def close(self): if self.ll_dbm: lib.gdbm_close(self.ll_dbm) diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -104,7 +104,7 @@ return _rweakvaldict.WeakValueDictRepr(rtyper, rtyper.getrepr(self.s_key)) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def method_get(self, s_key): @@ -164,7 +164,7 @@ from rpython.rlib import _rweakkeydict return _rweakkeydict.WeakKeyDictRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def method_get(self, s_key): diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -220,8 +220,8 @@ from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr return ControlledInstanceRepr(rtyper, self.s_real_obj, self.controller) - def rtyper_makekey_ex(self, rtyper): - real_key = rtyper.makekey(self.s_real_obj) + def rtyper_makekey(self): + real_key = self.s_real_obj.rtyper_makekey() return self.__class__, real_key, self.controller _make_none_union("SomeControlledInstance", "obj.s_real_obj, obj.controller", globals()) diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -81,7 +81,7 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.StatResultRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def getattr(self, s_attr): @@ -115,7 +115,7 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.StatvfsResultRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def getattr(self, s_attr): diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -268,8 +268,8 @@ return EnumerateIteratorRepr(r_baseiter) return r_container.make_iterator_repr(*self.variant) - def rtyper_makekey_ex(self, rtyper): - return self.__class__, rtyper.makekey(self.s_container), self.variant + def rtyper_makekey(self): + return self.__class__, self.s_container.rtyper_makekey(), self.variant class __extend__(annmodel.SomeImpossibleValue): diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -18,9 +18,9 @@ def rtyper_makerepr(self, rtyper): return TupleRepr(rtyper, [rtyper.getrepr(s_item) for s_item in self.items]) - def rtyper_makekey_ex(self, rtyper): - keys = [rtyper.makekey(s_item) for s_item in self.items] - return tuple([self.__class__]+keys) + def rtyper_makekey(self): + keys = [s_item.rtyper_makekey() for s_item in self.items] + return tuple([self.__class__] + keys) _gen_eq_function_cache = {} diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -135,14 +135,9 @@ return key._as_ptr() raise KeyError(search) - def makekey(self, s_obj): - if hasattr(s_obj, "rtyper_makekey_ex"): - return s_obj.rtyper_makekey_ex(self) - return s_obj.rtyper_makekey() - def getrepr(self, s_obj): # s_objs are not hashable... try hard to find a unique key anyway - key = self.makekey(s_obj) + key = s_obj.rtyper_makekey() assert key[0] is s_obj.__class__ try: result = self.reprs[key] @@ -589,8 +584,6 @@ classdef = hop.s_result.classdef return rclass.rtype_new_instance(self, classdef, hop.llops) - generic_translate_operation = None - def default_translate_operation(self, hop): raise TyperError("unimplemented operation: '%s'" % hop.spaceop.opname) @@ -688,13 +681,8 @@ def dispatch(self): rtyper = self.rtyper - generic = rtyper.generic_translate_operation - if generic is not None: - res = generic(self) - if res is not None: - return res opname = self.forced_opname or self.spaceop.opname - translate_meth = getattr(rtyper, 'translate_op_'+opname, + translate_meth = getattr(rtyper, 'translate_op_' + opname, rtyper.default_translate_operation) return translate_meth(self) diff --git a/rpython/rtyper/test/test_rtyper.py b/rpython/rtyper/test/test_rtyper.py --- a/rpython/rtyper/test/test_rtyper.py +++ b/rpython/rtyper/test/test_rtyper.py @@ -22,9 +22,8 @@ annmodel.SomeInteger())) stup2 = annmodel.SomeTuple((annmodel.SomeString(), annmodel.SomeInteger())) - rtyper = RPythonTyper(annrpython.RPythonAnnotator(None)) - key1 = rtyper.makekey(stup1) - key2 = rtyper.makekey(stup2) + key1 = stup1.rtyper_makekey() + key2 = stup2.rtyper_makekey() assert key1 != key2 def test_simple(): From noreply at buildbot.pypy.org Tue May 13 11:24:01 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 May 2014 11:24:01 +0200 (CEST) Subject: [pypy-commit] pypy default: make it a clear error Message-ID: <20140513092401.793DB1C0320@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71484:769e23925b13 Date: 2014-05-13 11:23 +0200 http://bitbucket.org/pypy/pypy/changeset/769e23925b13/ Log: make it a clear error diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -44,6 +44,8 @@ pass def _fromstr(key): + if not isinstance(key, str): + raise TypeError("gdbm mappings have string indices only") return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} class gdbm(object): From noreply at buildbot.pypy.org Tue May 13 11:32:05 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 May 2014 11:32:05 +0200 (CEST) Subject: [pypy-commit] pypy default: null chars Message-ID: <20140513093205.3BB6C1C0320@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71485:d334c17b0e39 Date: 2014-05-13 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/d334c17b0e39/ Log: null chars diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -87,6 +87,17 @@ self.g = gdbm.open(filename, 'r') self.assertEquals(self.g['x'], 'x' * 10000) + def test_key_with_null_bytes(self): + key = 'a\x00b' + value = 'c\x00d' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -90,7 +90,7 @@ drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) if not drec.dptr: raise KeyError(key) - res = ffi.string(drec.dptr, drec.dsize) + res = str(ffi.buffer(drec.dptr, drec.dsize)) lib.free(drec.dptr) return res @@ -99,7 +99,7 @@ l = [] key = lib.gdbm_firstkey(self.ll_dbm) while key.dptr: - l.append(ffi.string(key.dptr, key.dsize)) + l.append(str(ffi.buffer(key.dptr, key.dsize))) nextkey = lib.gdbm_nextkey(self.ll_dbm, key) lib.free(key.dptr) key = nextkey @@ -109,7 +109,7 @@ self._check_closed() key = lib.gdbm_firstkey(self.ll_dbm) if key.dptr: - res = ffi.string(key.dptr, key.dsize) + res = str(ffi.buffer(key.dptr, key.dsize)) lib.free(key.dptr) return res @@ -117,7 +117,7 @@ self._check_closed() key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) if key.dptr: - res = ffi.string(key.dptr, key.dsize) + res = str(ffi.buffer(key.dptr, key.dsize)) lib.free(key.dptr) return res From noreply at buildbot.pypy.org Tue May 13 13:20:27 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 13:20:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix logic in last commit Message-ID: <20140513112027.5A5361C0320@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71486:d95eff23a6ae Date: 2014-05-13 13:20 +0200 http://bitbucket.org/pypy/pypy/changeset/d95eff23a6ae/ Log: fix logic in last commit diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -228,12 +228,17 @@ /* Reduce the limit so that inevitable transactions are generally shorter. We depend a bit on stmcb_commit_soon() in order for other transactions to signal us in case we block them. */ + uintptr_t t; if (pypy_stm_ready_atomic == 1) { - pypy_stm_nursery_low_fill_mark >>= 2; + t = pypy_stm_nursery_low_fill_mark; + t = _stm_nursery_start + (t - _stm_nursery_start) >> 2; + pypy_stm_nursery_low_fill_mark = t; } else { assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); - pypy_stm_nursery_low_fill_mark_saved >>= 2; + t = pypy_stm_nursery_low_fill_mark_saved; + t = _stm_nursery_start + (t - _stm_nursery_start) >> 2; + pypy_stm_nursery_low_fill_mark_saved = t; } } From noreply at buildbot.pypy.org Tue May 13 13:43:25 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 13:43:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: argh Message-ID: <20140513114325.580D81C00B9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71487:aab4485a4943 Date: 2014-05-13 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/aab4485a4943/ Log: argh diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -231,13 +231,13 @@ uintptr_t t; if (pypy_stm_ready_atomic == 1) { t = pypy_stm_nursery_low_fill_mark; - t = _stm_nursery_start + (t - _stm_nursery_start) >> 2; + t = _stm_nursery_start + ((t - _stm_nursery_start) >> 2); pypy_stm_nursery_low_fill_mark = t; } else { assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); t = pypy_stm_nursery_low_fill_mark_saved; - t = _stm_nursery_start + (t - _stm_nursery_start) >> 2; + t = _stm_nursery_start + ((t - _stm_nursery_start) >> 2); pypy_stm_nursery_low_fill_mark_saved = t; } } From noreply at buildbot.pypy.org Tue May 13 14:10:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 14:10:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix to really not become inevitable before pure raw_loads (hopefully not breaking anything) Message-ID: <20140513121053.1DC9B1C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71488:2eecc075280f Date: 2014-05-13 14:10 +0200 http://bitbucket.org/pypy/pypy/changeset/2eecc075280f/ Log: fix to really not become inevitable before pure raw_loads (hopefully not breaking anything) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -1009,7 +1009,7 @@ op_raw_memmove = op_raw_memcopy # this is essentially the same here - def op_raw_load(self, RESTYPE, addr, offset): + def op_raw_load(self, RESTYPE, addr, offset, pure=False): checkadr(addr) if isinstance(offset, int): from rpython.rtyper.lltypesystem import rffi diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -43,13 +43,13 @@ # and it doesn't use the hint 'stm_dont_track_raw_accesses', then they # turn inevitable. TYPE = op.args[0].concretetype + if is_immutable(op): + return False if not isinstance(TYPE, lltype.Ptr): return True # raw_load or raw_store with a number or address S = TYPE.TO if S._gckind == 'gc': return False - if is_immutable(op): - return False if S._hints.get('stm_dont_track_raw_accesses', False): return False return not fresh_mallocs.is_fresh_malloc(op.args[0]) diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -255,3 +255,27 @@ res = self.interpret_inevitable(f, [2]) assert res is None + + def test_raw_load_nonpure(self): + X = lltype.Struct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 42 + + def f1(): + return llop.raw_load( + lltype.Signed, llmemory.cast_ptr_to_adr(x1), 0, False) + + res = self.interpret_inevitable(f1, []) + assert res == 'raw_load' + + def test_raw_load_pure(self): + X = lltype.Struct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 42 + + def f1(): + return llop.raw_load( + lltype.Signed, llmemory.cast_ptr_to_adr(x1), 0, True) + + res = self.interpret_inevitable(f1, []) + assert res is None From noreply at buildbot.pypy.org Tue May 13 17:53:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 May 2014 17:53:13 +0200 (CEST) Subject: [pypy-commit] stmgc timelog: Close this branch, made obsolete with the "marker" branch. Message-ID: <20140513155313.EDA151C0320@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: timelog Changeset: r1207:13446f46ad66 Date: 2014-05-13 16:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/13446f46ad66/ Log: Close this branch, made obsolete with the "marker" branch. From noreply at buildbot.pypy.org Tue May 13 17:53:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 May 2014 17:53:15 +0200 (CEST) Subject: [pypy-commit] stmgc gc-small-uniform: hg merge default Message-ID: <20140513155315.830001C0320@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: gc-small-uniform Changeset: r1208:7cc0f05c1049 Date: 2014-05-13 16:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/7cc0f05c1049/ Log: hg merge default diff too long, truncating to 2000 out of 2212 lines diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -11,3 +11,8 @@ - fork() is done by copying the whole mmap non-lazily; improve. - contention.c: when pausing: should also tell other_pseg "please commit soon" + +- resharing: remap_file_pages on multiple pages at once; and madvise() + the unused pages away --- or maybe use consecutive addresses from the + lowest ones from segment N, instead of the page corresponding to the page + number in segment 0 (possibly a bit messy) diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -44,6 +44,16 @@ visit((object_t **)&n->next); } +void stmcb_commit_soon() {} + +static void expand_marker(char *base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize) +{ + assert(following_object == NULL); + snprintf(outputbuf, outputbufsize, "<%p %lu>", base, odd_number); +} + nodeptr_t global_chained_list; @@ -88,6 +98,18 @@ STM_START_TRANSACTION(&stm_thread_local, here); + if (stm_thread_local.longest_marker_state != 0) { + fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", + &stm_thread_local, + stm_thread_local.longest_marker_state, + stm_thread_local.longest_marker_time); + fprintf(stderr, "\tself:\t\"%s\"\n\tother:\t\"%s\"\n", + stm_thread_local.longest_marker_self, + stm_thread_local.longest_marker_other); + stm_thread_local.longest_marker_state = 0; + stm_thread_local.longest_marker_time = 0.0; + } + nodeptr_t prev = initial; stm_read((objptr_t)prev); @@ -194,15 +216,24 @@ { int status; stm_register_thread_local(&stm_thread_local); + char *org = (char *)stm_thread_local.shadowstack; STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ + int loops = 0; + while (check_sorted() == -1) { + + STM_PUSH_MARKER(stm_thread_local, 2 * loops + 1, NULL); + bubble_run(); + + STM_POP_MARKER(stm_thread_local); + loops++; } STM_POP_ROOT(stm_thread_local, global_chained_list); - assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + OPT_ASSERT(org == (char *)stm_thread_local.shadowstack); unregister_thread_local(); status = sem_post(&done); assert(status == 0); @@ -245,6 +276,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + stmcb_expand_marker = expand_marker; setup_list(); diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c --- a/c7/demo/demo_largemalloc.c +++ b/c7/demo/demo_largemalloc.c @@ -23,6 +23,8 @@ abort(); } +void stmcb_commit_soon() {} + /************************************************************/ #define ARENA_SIZE (1024*1024*1024) diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -79,6 +79,8 @@ assert(n->next == *last_next); } +void stmcb_commit_soon() {} + int get_rand(int max) { if (max == 0) diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -39,6 +39,8 @@ visit((object_t **)&n->next); } +void stmcb_commit_soon() {} + static sem_t done; @@ -50,6 +52,7 @@ { int status; stm_register_thread_local(&stm_thread_local); + char *org = (char *)stm_thread_local.shadowstack; tl_counter = 0; object_t *tmp; @@ -65,7 +68,7 @@ i++; } - assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + assert(org == (char *)stm_thread_local.shadowstack); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); diff --git a/c7/doc/marker.txt b/c7/doc/marker.txt new file mode 100644 --- /dev/null +++ b/c7/doc/marker.txt @@ -0,0 +1,42 @@ + +Reports +======= + +- self-abort: + WRITE_WRITE_CONTENTION, INEVITABLE_CONTENTION: + marker in both threads, time lost by this thread + WRITE_READ_CONTENTION: + marker pointing back to the write, time lost by this thread + +- aborted by a different thread: + WRITE_WRITE_CONTENTION: + marker in both threads, time lost by this thread + WRITE_READ_CONTENTION: + remote marker pointing back to the write, time lost by this thread + (no local marker available to know where we've read the object from) + INEVITABLE_CONTENTION: + n/a + +- self-pausing: + same as self-abort, but reporting the time lost by pausing + +- waiting for a free segment: + - if we're waiting because of inevitability, report with a + marker and the time lost + - if we're just waiting because of no free segment, don't report it, + or maybe with only the total time lost and no marker + +- more internal reasons for cond_wait(), like synchronizing the threads, + should all be resolved quickly and are unlikely worth a report + + +Internal Measurements +===================== + +- use clock_gettime(CLOCK_MONOTONIC), it seems to be the fastest way + (less than 5 times slower than a RDTSC instruction, which is itself + not safe in the presence of threads migrating among CPUs) + +- record only the highest-time entry. The user of the library is + responsible for getting and clearing it often enough if it wants + more details. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -99,7 +99,8 @@ static void contention_management(uint8_t other_segment_num, - enum contention_kind_e kind) + enum contention_kind_e kind, + object_t *obj) { assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); @@ -161,10 +162,12 @@ itself already paused here. */ contmgr.other_pseg->signal_when_done = true; + marker_contention(kind, false, other_segment_num, obj); change_timing_state(wait_category); - /* XXX should also tell other_pseg "please commit soon" */ + /* tell the other to commit ASAP */ + signal_other_to_commit_soon(contmgr.other_pseg); dprintf(("pausing...\n")); cond_signal(C_AT_SAFE_POINT); @@ -176,12 +179,22 @@ if (must_abort()) abort_with_mutex(); - change_timing_state(STM_TIME_RUN_CURRENT); + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + double elapsed = + change_timing_state_tl(pseg->pub.running_thread, + STM_TIME_RUN_CURRENT); + marker_copy(pseg->pub.running_thread, pseg, + wait_category, elapsed); } else if (!contmgr.abort_other) { + /* tell the other to commit ASAP, since it causes aborts */ + signal_other_to_commit_soon(contmgr.other_pseg); + dprintf(("abort in contention\n")); STM_SEGMENT->nursery_end = abort_category; + marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); } @@ -189,6 +202,7 @@ /* We have to signal the other thread to abort, and wait until it does. */ contmgr.other_pseg->pub.nursery_end = abort_category; + marker_contention(kind, true, other_segment_num, obj); int sp = contmgr.other_pseg->safe_point; switch (sp) { @@ -256,10 +270,18 @@ abort_data_structures_from_segment_num(other_segment_num); } dprintf(("killed other thread\n")); + + /* we should commit soon, we caused an abort */ + //signal_other_to_commit_soon(get_priv_segment(STM_SEGMENT->segment_num)); + if (!STM_PSEGMENT->signalled_to_commit_soon) { + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + } } } -static void write_write_contention_management(uintptr_t lock_idx) +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj) { s_mutex_lock(); @@ -270,7 +292,7 @@ assert(get_priv_segment(other_segment_num)->write_lock_num == prev_owner); - contention_management(other_segment_num, WRITE_WRITE_CONTENTION); + contention_management(other_segment_num, WRITE_WRITE_CONTENTION, obj); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ @@ -279,12 +301,13 @@ s_mutex_unlock(); } -static void write_read_contention_management(uint8_t other_segment_num) +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj) { - contention_management(other_segment_num, WRITE_READ_CONTENTION); + contention_management(other_segment_num, WRITE_READ_CONTENTION, obj); } static void inevitable_contention_management(uint8_t other_segment_num) { - contention_management(other_segment_num, INEVITABLE_CONTENTION); + contention_management(other_segment_num, INEVITABLE_CONTENTION, NULL); } diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,10 +1,13 @@ -static void write_write_contention_management(uintptr_t lock_idx); -static void write_read_contention_management(uint8_t other_segment_num); +static void write_write_contention_management(uintptr_t lock_idx, + object_t *obj); +static void write_read_contention_management(uint8_t other_segment_num, + object_t *obj); static void inevitable_contention_management(uint8_t other_segment_num); static inline bool is_abort(uintptr_t nursery_end) { - return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE); + return (nursery_end <= _STM_NSE_SIGNAL_MAX && nursery_end != NSE_SIGPAUSE + && nursery_end != NSE_SIGCOMMITSOON); } static inline bool is_aborting_now(uint8_t other_segment_num) { diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -14,13 +14,10 @@ #define EVENTUALLY(condition) \ { \ if (!(condition)) { \ - int _i; \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_acquire(lock_pages_privatizing[_i]); \ + acquire_privatization_lock(); \ if (!(condition)) \ stm_fatalerror("fails: " #condition); \ - for (_i = 1; _i <= NB_SEGMENTS; _i++) \ - spinlock_release(lock_pages_privatizing[_i]); \ + release_privatization_lock(); \ } \ } #endif @@ -76,9 +73,15 @@ assert(lock_idx < sizeof(write_locks)); retry: if (write_locks[lock_idx] == 0) { + /* A lock to prevent reading garbage from + lookup_other_thread_recorded_marker() */ + acquire_marker_lock(STM_SEGMENT->segment_base); + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], - 0, lock_num))) + 0, lock_num))) { + release_marker_lock(STM_SEGMENT->segment_base); goto retry; + } dprintf_test(("write_slowpath %p -> mod_old\n", obj)); @@ -86,6 +89,15 @@ Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + /* Add the current marker, recording where we wrote to this object */ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->modified_old_objects_markers = + list_append2(STM_PSEGMENT->modified_old_objects_markers, + marker[0], marker[1]); + + release_marker_lock(STM_SEGMENT->segment_base); + /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ @@ -127,7 +139,7 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx); + write_write_contention_management(lock_idx, obj); goto retry; } @@ -195,7 +207,13 @@ assert(STM_PSEGMENT->transaction_state == TS_NONE); change_timing_state(STM_TIME_RUN_CURRENT); STM_PSEGMENT->start_time = tl->_timing_cur_start; + STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; +#ifndef NDEBUG + STM_PSEGMENT->marker_inev[1] = 99999999999999999L; +#endif + if (jmpbuf == NULL) + marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; @@ -223,12 +241,17 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(list_is_empty(STM_PSEGMENT->modified_old_objects_markers)); assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); +#ifndef NDEBUG + /* this should not be used when objects_pointing_to_nursery == NULL */ + STM_PSEGMENT->modified_old_objects_markers_num_old = 99999999999999999L; +#endif check_nursery_at_transaction_start(); } @@ -263,7 +286,7 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - write_read_contention_management(i); + write_read_contention_management(i, item); /* If we reach this point, we didn't abort, but maybe we had to wait for the other thread to commit. If we @@ -359,12 +382,15 @@ It is first copied into the shared pages, and then into other segments' own private pages. (The second part might be done later; call synchronize_objects_flush() to flush this queue.) + + Must be called with the privatization lock acquired. */ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); ssize_t obj_size = stmcb_size_rounded_up( (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); OPT_ASSERT(obj_size >= 16); + assert(STM_PSEGMENT->privatization_lock == 1); if (LIKELY(is_small_uniform(obj))) { _synchronize_fragment((stm_char *)obj, obj_size); @@ -444,13 +470,16 @@ if (STM_PSEGMENT->large_overflow_objects == NULL) return; + acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, synchronize_object_enqueue(item)); synchronize_objects_flush(); + release_privatization_lock(); } static void push_modified_to_other_segments(void) { + acquire_privatization_lock(); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, @@ -470,9 +499,11 @@ private pages as needed */ synchronize_object_enqueue(item); })); + release_privatization_lock(); synchronize_objects_flush(); list_clear(STM_PSEGMENT->modified_old_objects); + list_clear(STM_PSEGMENT->modified_old_objects_markers); } static void _finish_transaction(int attribute_to) @@ -614,6 +645,7 @@ })); list_clear(pseg->modified_old_objects); + list_clear(pseg->modified_old_objects_markers); } static void abort_data_structures_from_segment_num(int segment_num) @@ -638,6 +670,10 @@ (int)pseg->transaction_state); } + /* if we don't have marker information already, look up and preserve + the marker information from the shadowstack as a string */ + marker_default_for_abort(pseg); + /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); @@ -648,6 +684,7 @@ value before the transaction start */ stm_thread_local_t *tl = pseg->pub.running_thread; assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); + pseg->shadowstack_at_abort = tl->shadowstack; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; @@ -719,6 +756,7 @@ if (STM_PSEGMENT->transaction_state == TS_REGULAR) { dprintf(("become_inevitable: %s\n", msg)); + marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = NULL; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -75,9 +75,17 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. */ + that need to be copied to other segments upon commit. Note that + every object takes three list items: the object, and two words for + the location marker. */ struct list_s *modified_old_objects; + /* For each entry in 'modified_old_objects', we have two entries + in the following list, which give the marker at the time we added + the entry to modified_old_objects. */ + struct list_s *modified_old_objects_markers; + uintptr_t modified_old_objects_markers_num_old; + /* List of out-of-nursery objects that may contain pointers to nursery objects. This is used to track the GC status: they are all objects outside the nursery on which an stm_write() occurred @@ -145,10 +153,30 @@ /* For sleeping contention management */ bool signal_when_done; + /* This lock is acquired when that segment calls synchronize_object_now. + On the rare event of a page_privatize(), the latter will acquire + all the locks in all segments. Otherwise, for the common case, + it's cheap. (The set of all 'privatization_lock' in all segments + works like one single read-write lock, with page_privatize() acquiring + the write lock; but this variant is more efficient for the case of + many reads / rare writes.) */ + uint8_t privatization_lock; + + /* This lock is acquired when we mutate 'modified_old_objects' but + we don't have the global mutex. It is also acquired during minor + collection. It protects against a different thread that tries to + get this segment's marker corresponding to some object, or to + expand the marker into a full description. */ + uint8_t marker_lock; + /* In case of abort, we restore the 'shadowstack' field and the 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; + struct stm_shadowentry_s *shadowstack_at_abort; + + /* Already signalled to commit soon: */ + bool signalled_to_commit_soon; /* For debugging */ #ifndef NDEBUG @@ -163,6 +191,11 @@ stm_char *sq_fragments[SYNC_QUEUE_SIZE]; int sq_fragsizes[SYNC_QUEUE_SIZE]; int sq_len; + + /* Temporarily stores the marker information */ + char marker_self[_STM_MARKER_LEN]; + char marker_other[_STM_MARKER_LEN]; + uintptr_t marker_inev[2]; /* marker where this thread became inevitable */ }; enum /* safe_point */ { @@ -185,6 +218,7 @@ static #endif char *stm_object_pages; +static int stm_object_pages_fd; static stm_thread_local_t *stm_all_thread_locals = NULL; static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; @@ -236,3 +270,31 @@ static void copy_object_to_shared(object_t *obj, int source_segment_num); static void synchronize_object_enqueue(object_t *obj); static void synchronize_objects_flush(void); + +static inline void acquire_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_acquire(*lock); +} + +static inline void release_privatization_lock(void) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->privatization_lock); + spinlock_release(*lock); +} + +static inline void acquire_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_acquire(*lock); +} + +static inline void release_marker_lock(char *segment_base) +{ + uint8_t *lock = (uint8_t *)REAL_ADDRESS(segment_base, + &STM_PSEGMENT->marker_lock); + spinlock_release(*lock); +} diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -8,14 +8,10 @@ static char *fork_big_copy = NULL; +static int fork_big_copy_fd; static stm_thread_local_t *fork_this_tl; static bool fork_was_in_transaction; -static char *setup_mmap(char *reason); /* forward, in setup.c */ -static void setup_protection_settings(void); /* forward, in setup.c */ -static pthread_t *_get_cpth(stm_thread_local_t *);/* forward, in setup.c */ - - static bool page_is_null(char *p) { long *q = (long *)p; @@ -74,7 +70,8 @@ /* Make a new mmap at some other address, but of the same size as the standard mmap at stm_object_pages */ - char *big_copy = setup_mmap("stmgc's fork support"); + int big_copy_fd; + char *big_copy = setup_mmap("stmgc's fork support", &big_copy_fd); /* Copy each of the segment infos into the new mmap, nurseries, and associated read markers @@ -139,6 +136,7 @@ assert(fork_big_copy == NULL); fork_big_copy = big_copy; + fork_big_copy_fd = big_copy_fd; fork_this_tl = this_tl; fork_was_in_transaction = was_in_transaction; @@ -163,6 +161,7 @@ assert(fork_big_copy != NULL); munmap(fork_big_copy, TOTAL_MEMORY); fork_big_copy = NULL; + close_fd_mmap(fork_big_copy_fd); bool was_in_transaction = fork_was_in_transaction; s_mutex_unlock(); @@ -214,6 +213,8 @@ if (res != stm_object_pages) stm_fatalerror("after fork: mremap failed: %m"); fork_big_copy = NULL; + close_fd_mmap(stm_object_pages_fd); + stm_object_pages_fd = fork_big_copy_fd; /* Unregister all other stm_thread_local_t, mostly as a way to free the memory used by the shadowstacks diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -49,17 +49,20 @@ /* uncommon case: need to initialize some more pages */ spinlock_acquire(lock_growth_large); - if (addr + size > uninitialized_page_start) { + char *start = uninitialized_page_start; + if (addr + size > start) { uintptr_t npages; - npages = (addr + size - uninitialized_page_start) / 4096UL; + npages = (addr + size - start) / 4096UL; npages += GCPAGE_NUM_PAGES; - if (uninitialized_page_stop - uninitialized_page_start < - npages * 4096UL) { + if (uninitialized_page_stop - start < npages * 4096UL) { stm_fatalerror("out of memory!"); /* XXX */ } - setup_N_pages(uninitialized_page_start, npages); - __sync_synchronize(); - uninitialized_page_start += npages * 4096UL; + setup_N_pages(start, npages); + if (!__sync_bool_compare_and_swap(&uninitialized_page_start, + start, + start + npages * 4096UL)) { + stm_fatalerror("uninitialized_page_start changed?"); + } } spinlock_release(lock_growth_large); return addr; @@ -336,8 +339,8 @@ struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; while (current-- != base) { - assert(current->ss != (object_t *)-1); - mark_visit_object(current->ss, segment_base); + if ((((uintptr_t)current->ss) & 3) == 0) + mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); @@ -375,6 +378,23 @@ } } +static void mark_visit_from_markers(void) +{ + long j; + for (j = 1; j <= NB_SEGMENTS; j++) { + char *base = get_segment_base(j); + struct list_s *lst = get_priv_segment(j)->modified_old_objects_markers; + uintptr_t i; + for (i = list_count(lst); i > 0; i -= 2) { + mark_visit_object((object_t *)list_item(lst, i - 1), base); + } + if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; + mark_visit_object((object_t *)marker_inev_obj, base); + } + } +} + static void clean_up_segment_lists(void) { long i; @@ -477,6 +497,7 @@ /* marking */ LIST_CREATE(mark_objects_to_trace); mark_visit_from_modified_objects(); + mark_visit_from_markers(); mark_visit_from_roots(); LIST_FREE(mark_objects_to_trace); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -353,6 +353,9 @@ mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; increment_total_allocated(request_size + LARGE_MALLOC_OVERHEAD); +#ifndef NDEBUG + memset((char *)&mscan->d, 0xda, request_size); +#endif lm_unlock(); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -33,6 +33,18 @@ #define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) +static inline struct list_s *list_append2(struct list_s *lst, + uintptr_t item0, uintptr_t item1) +{ + uintptr_t index = lst->count; + lst->count += 2; + if (UNLIKELY(index >= lst->last_allocated)) + lst = _list_grow(lst, index + 1); + lst->items[index + 0] = item0; + lst->items[index + 1] = item1; + return lst; +} + static inline void list_clear(struct list_s *lst) { @@ -66,6 +78,11 @@ lst->items[index] = newitem; } +static inline uintptr_t *list_ptr_to_item(struct list_s *lst, uintptr_t index) +{ + return &lst->items[index]; +} + #define LIST_FOREACH_R(lst, TYPE, CODE) \ do { \ struct list_s *_lst = (lst); \ diff --git a/c7/stm/marker.c b/c7/stm/marker.c new file mode 100644 --- /dev/null +++ b/c7/stm/marker.c @@ -0,0 +1,198 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); + +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + + +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]) +{ + /* fetch the current marker from the tl's shadow stack, + and return it in 'marker[2]'. */ + struct stm_shadowentry_s *current = tl->shadowstack - 1; + struct stm_shadowentry_s *base = tl->shadowstack_base; + + /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is + a convenient stopper for the loop below but which shouldn't + be returned. */ + assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + + while (!(((uintptr_t)current->ss) & 1)) { + current--; + assert(current >= base); + } + if (current != base) { + /* found the odd marker */ + marker[0] = (uintptr_t)current[0].ss; + marker[1] = (uintptr_t)current[1].ss; + } + else { + /* no marker found */ + marker[0] = 0; + marker[1] = 0; + } +} + +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker) +{ + /* Expand the marker given by 'marker[2]' into a full string. This + works assuming that the marker was produced inside the segment + given by 'segment_base'. If that's from a different thread, you + must first acquire the corresponding 'marker_lock'. */ + assert(_has_mutex()); + outmarker[0] = 0; + if (marker[0] == 0) + return; /* no marker entry found */ + if (stmcb_expand_marker != NULL) { + stmcb_expand_marker(segment_base, marker[0], (object_t *)marker[1], + outmarker, _STM_MARKER_LEN); + } +} + +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg) +{ + if (pseg->marker_self[0] != 0) + return; /* already collected an entry */ + + uintptr_t marker[2]; + marker_fetch(pseg->pub.running_thread, marker); + marker_expand(marker, pseg->pub.segment_base, pseg->marker_self); + pseg->marker_other[0] = 0; +} + +char *_stm_expand_marker(void) +{ + /* for tests only! */ + static char _result[_STM_MARKER_LEN]; + uintptr_t marker[2]; + _result[0] = 0; + s_mutex_lock(); + marker_fetch(STM_SEGMENT->running_thread, marker); + marker_expand(marker, STM_SEGMENT->segment_base, _result); + s_mutex_unlock(); + return _result; +} + +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time) +{ + /* Copies the marker information from pseg to tl. This is called + indirectly from abort_with_mutex(), but only if the lost time is + greater than that of the previous recorded marker. By contrast, + pseg->marker_self has been filled already in all cases. The + reason for the two steps is that we must fill pseg->marker_self + earlier than now (some objects may be GCed), but we only know + here the total time it gets attributed. + */ + if (stmcb_debug_print) { + stmcb_debug_print(timer_names[attribute_to], time, pseg->marker_self); + } + if (time * 0.99 > tl->longest_marker_time) { + tl->longest_marker_state = attribute_to; + tl->longest_marker_time = time; + memcpy(tl->longest_marker_self, pseg->marker_self, _STM_MARKER_LEN); + memcpy(tl->longest_marker_other, pseg->marker_other, _STM_MARKER_LEN); + } + pseg->marker_self[0] = 0; + pseg->marker_other[0] = 0; +} + +static void marker_fetch_obj_write(uint8_t in_segment_num, object_t *obj, + uintptr_t marker[2]) +{ + assert(_has_mutex()); + + /* here, we acquired the other thread's marker_lock, which means that: + + (1) it has finished filling 'modified_old_objects' after it sets + up the write_locks[] value that we're conflicting with + + (2) it is not mutating 'modified_old_objects' right now (we have + the global mutex_lock at this point too). + */ + long i; + struct stm_priv_segment_info_s *pseg = get_priv_segment(in_segment_num); + struct list_s *mlst = pseg->modified_old_objects; + struct list_s *mlstm = pseg->modified_old_objects_markers; + for (i = list_count(mlst); --i >= 0; ) { + if (list_item(mlst, i) == (uintptr_t)obj) { + assert(list_count(mlstm) == 2 * list_count(mlst)); + marker[0] = list_item(mlstm, i * 2 + 0); + marker[1] = list_item(mlstm, i * 2 + 1); + return; + } + } + marker[0] = 0; + marker[1] = 0; +} + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj) +{ + uintptr_t self_marker[2]; + uintptr_t other_marker[2]; + struct stm_priv_segment_info_s *my_pseg, *other_pseg; + + my_pseg = get_priv_segment(STM_SEGMENT->segment_num); + other_pseg = get_priv_segment(other_segment_num); + + char *my_segment_base = STM_SEGMENT->segment_base; + char *other_segment_base = get_segment_base(other_segment_num); + + acquire_marker_lock(other_segment_base); + + /* Collect the location for myself. It's usually the current + location, except in a write-read abort, in which case it's the + older location of the write. */ + if (kind == WRITE_READ_CONTENTION) + marker_fetch_obj_write(my_pseg->pub.segment_num, obj, self_marker); + else + marker_fetch(my_pseg->pub.running_thread, self_marker); + + /* Expand this location into either my_pseg->marker_self or + other_pseg->marker_other, depending on who aborts. */ + marker_expand(self_marker, my_segment_base, + abort_other ? other_pseg->marker_other + : my_pseg->marker_self); + + /* For some categories, we can also collect the relevant information + for the other segment. */ + char *outmarker = abort_other ? other_pseg->marker_self + : my_pseg->marker_other; + switch (kind) { + case WRITE_WRITE_CONTENTION: + marker_fetch_obj_write(other_segment_num, obj, other_marker); + marker_expand(other_marker, other_segment_base, outmarker); + break; + case INEVITABLE_CONTENTION: + assert(abort_other == false); + other_marker[0] = other_pseg->marker_inev[0]; + other_marker[1] = other_pseg->marker_inev[1]; + marker_expand(other_marker, other_segment_base, outmarker); + break; + case WRITE_READ_CONTENTION: + strcpy(outmarker, ""); + break; + default: + outmarker[0] = 0; + break; + } + + release_marker_lock(other_segment_base); +} + +static void marker_fetch_inev(void) +{ + uintptr_t marker[2]; + marker_fetch(STM_SEGMENT->running_thread, marker); + STM_PSEGMENT->marker_inev[0] = marker[0]; + STM_PSEGMENT->marker_inev[1] = marker[1]; +} diff --git a/c7/stm/marker.h b/c7/stm/marker.h new file mode 100644 --- /dev/null +++ b/c7/stm/marker.h @@ -0,0 +1,12 @@ + +static void marker_fetch(stm_thread_local_t *tl, uintptr_t marker[2]); +static void marker_fetch_inev(void); +static void marker_expand(uintptr_t marker[2], char *segment_base, + char *outmarker); +static void marker_default_for_abort(struct stm_priv_segment_info_s *pseg); +static void marker_copy(stm_thread_local_t *tl, + struct stm_priv_segment_info_s *pseg, + enum stm_time_e attribute_to, double time); + +static void marker_contention(int kind, bool abort_other, + uint8_t other_segment_num, object_t *obj); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -152,9 +152,29 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; struct stm_shadowentry_s *current = tl->shadowstack; struct stm_shadowentry_s *base = tl->shadowstack_base; - while (current-- != base) { - assert(current->ss != (object_t *)-1); - minor_trace_if_young(¤t->ss); + while (1) { + --current; + OPT_ASSERT(current >= base); + + uintptr_t x = (uintptr_t)current->ss; + + if ((x & 3) == 0) { + /* the stack entry is a regular pointer (possibly NULL) */ + minor_trace_if_young(¤t->ss); + } + else if (x == STM_STACK_MARKER_NEW) { + /* the marker was not already seen: mark it as seen, + but continue looking more deeply in the shadowstack */ + current->ss = (object_t *)STM_STACK_MARKER_OLD; + } + else if (x == STM_STACK_MARKER_OLD) { + /* the marker was already seen: we can stop the + root stack tracing at this point */ + break; + } + else { + /* it is an odd-valued marker, ignore */ + } } minor_trace_if_young(&tl->thread_local_obj); } @@ -184,6 +204,7 @@ _collect_now(obj); + XXX acquire_privatization_lock(); release_privatization_lock(); ? synchronize_object_enqueue(obj); /* the list could have moved while appending */ @@ -199,6 +220,24 @@ _collect_now(item)); } +static void collect_roots_from_markers(uintptr_t num_old) +{ + /* visit the marker objects */ + struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; + STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); + uintptr_t i, total = list_count(mlst); + assert((total & 1) == 0); + for (i = num_old + 1; i < total; i += 2) { + minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); + } + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + uintptr_t *pmarker_inev_obj = (uintptr_t *) + REAL_ADDRESS(STM_SEGMENT->segment_base, + &STM_PSEGMENT->marker_inev[1]); + minor_trace_if_young((object_t **)pmarker_inev_obj); + } +} + static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { /* reset the nursery by zeroing it */ @@ -207,6 +246,11 @@ realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start); nursery_used = pseg->pub.nursery_current - (stm_char *)_stm_nursery_start; + if (nursery_used > NB_NURSERY_PAGES * 4096) { + /* possible in rare cases when the program artificially advances + its own nursery_current */ + nursery_used = NB_NURSERY_PAGES * 4096; + } OPT_ASSERT((nursery_used & 7) == 0); memset(realnursery, 0, nursery_used); @@ -248,8 +292,16 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); + acquire_marker_lock(STM_SEGMENT->segment_base); + STM_PSEGMENT->minor_collect_will_commit_now = commit; if (!commit) { + /* We should commit soon, probably. This is kind of a + workaround for the broken stm_should_break_transaction of + pypy that doesn't want to commit any more after a minor + collection. It may, however, always be a good idea... */ + stmcb_commit_soon(); + /* 'STM_PSEGMENT->overflow_number' is used now by this collection, in the sense that it's copied to the overflow objects */ STM_PSEGMENT->overflow_number_has_been_used = true; @@ -263,6 +315,7 @@ /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ + uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); @@ -272,11 +325,15 @@ into objects_pointing_to_nursery, but instead we use the following shortcut */ collect_modified_old_objects(); + num_old = 0; } else { + num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; abort(); // handle specially the objects_pointing_to_nursery already there } + collect_roots_from_markers(num_old); + collect_roots_in_nursery(); collect_oldrefs_to_nursery(); @@ -288,6 +345,8 @@ assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); + + release_marker_lock(STM_SEGMENT->segment_base); } static void minor_collection(bool commit) diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,6 +1,7 @@ /* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGxxx */ #define NSE_SIGPAUSE STM_TIME_WAIT_OTHER +#define NSE_SIGCOMMITSOON STM_TIME_SYNC_COMMIT_SOON static uint32_t highest_overflow_number; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -81,9 +81,18 @@ can only be remapped to page N in another segment */ assert(((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0); +#ifdef USE_REMAP_FILE_PAGES int res = remap_file_pages(addr, size, 0, pgoff, 0); if (UNLIKELY(res < 0)) stm_fatalerror("remap_file_pages: %m"); +#else + char *res = mmap(addr, size, + PROT_READ | PROT_WRITE, + (MAP_PAGES_FLAGS & ~MAP_ANONYMOUS) | MAP_FIXED, + stm_object_pages_fd, pgoff * 4096UL); + if (UNLIKELY(res != addr)) + stm_fatalerror("mmap (remapping page): %m"); +#endif } static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) @@ -108,18 +117,20 @@ { /* check this thread's 'pages_privatized' bit */ uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1); - struct page_shared_s *ps = &pages_privatized[pagenum - PAGE_FLAG_START]; + volatile struct page_shared_s *ps = (volatile struct page_shared_s *) + &pages_privatized[pagenum - PAGE_FLAG_START]; if (ps->by_segment & bitmask) { /* the page is already privatized; nothing to do */ return; } -#ifndef NDEBUG - spinlock_acquire(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + spinlock_acquire(get_priv_segment(i)->privatization_lock); + } /* add this thread's 'pages_privatized' bit */ - __sync_fetch_and_add(&ps->by_segment, bitmask); + ps->by_segment |= bitmask; /* "unmaps" the page to make the address space location correspond again to its underlying file offset (XXX later we should again @@ -133,9 +144,9 @@ /* copy the content from the shared (segment 0) source */ pagecopy(new_page, stm_object_pages + pagenum * 4096UL); -#ifndef NDEBUG - spinlock_release(lock_pages_privatizing[STM_SEGMENT->segment_num]); -#endif + for (i = NB_SEGMENTS; i >= 1; i--) { + spinlock_release(get_priv_segment(i)->privatization_lock); + } } static void _page_do_reshare(long segnum, uintptr_t pagenum) @@ -167,6 +178,7 @@ static void pages_setup_readmarkers_for_nursery(void) { +#ifdef USE_REMAP_FILE_PAGES /* The nursery page's read markers are never read, but must still be writeable. We'd like to map the pages to a general "trash page"; missing one, we remap all the pages over to the same one. @@ -185,4 +197,5 @@ /* errors here ignored */ } } +#endif } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -19,6 +19,8 @@ #define PAGE_FLAG_START END_NURSERY_PAGE #define PAGE_FLAG_END NB_PAGES +#define USE_REMAP_FILE_PAGES + struct page_shared_s { #if NB_SEGMENTS <= 8 uint8_t by_segment; @@ -34,20 +36,6 @@ }; static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START]; -/* Rules for concurrent access to this array, possibly with is_private_page(): - - - we clear bits only during major collection, when all threads are - synchronized anyway - - - we set only the bit corresponding to our segment number, using - an atomic addition; and we do it _before_ we actually make the - page private. - - - concurrently, other threads checking the bits might (rarely) - get the answer 'true' to is_private_page() even though it is not - actually private yet. This inconsistency is in the direction - that we want for synchronize_object_now(). -*/ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); static void page_privatize(uintptr_t pagenum); @@ -86,7 +74,3 @@ if (pages_privatized[pagenum - PAGE_FLAG_START].by_segment != 0) page_reshare(pagenum); } - -#ifndef NDEBUG -static char lock_pages_privatizing[NB_SEGMENTS + 1] = { 0 }; -#endif diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -3,7 +3,8 @@ #endif -static char *setup_mmap(char *reason) +#ifdef USE_REMAP_FILE_PAGES +static char *setup_mmap(char *reason, int *ignored) { char *result = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, @@ -13,6 +14,45 @@ return result; } +static void close_fd_mmap(int ignored) +{ +} +#else +#include /* For O_* constants */ +static char *setup_mmap(char *reason, int *map_fd) +{ + char name[128]; + sprintf(name, "/stmgc-c7-bigmem-%ld-%.18e", + (long)getpid(), get_stm_time()); + + /* Create the big shared memory object, and immediately unlink it. + There is a small window where if this process is killed the + object is left around. It doesn't seem possible to do anything + about it... + */ + int fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); + shm_unlink(name); + + if (fd == -1) { + stm_fatalerror("%s failed (stm_open): %m", reason); + } + if (ftruncate(fd, TOTAL_MEMORY) != 0) { + stm_fatalerror("%s failed (ftruncate): %m", reason); + } + char *result = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS & ~MAP_ANONYMOUS, fd, 0); + if (result == MAP_FAILED) { + stm_fatalerror("%s failed (mmap): %m", reason); + } + *map_fd = fd; + return result; +} +static void close_fd_mmap(int map_fd) +{ + close(map_fd); +} +#endif static void setup_protection_settings(void) { @@ -56,7 +96,8 @@ (FIRST_READMARKER_PAGE * 4096UL)); assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096); - stm_object_pages = setup_mmap("initial stm_object_pages mmap()"); + stm_object_pages = setup_mmap("initial stm_object_pages mmap()", + &stm_object_pages_fd); setup_protection_settings(); long i; @@ -78,6 +119,7 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->modified_old_objects_markers = list_create(); pr->young_weakrefs = list_create(); pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); @@ -85,15 +127,16 @@ pr->callbacks_on_abort = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; + pr->pub.transaction_read_version = 0xff; } /* The pages are shared lazily, as remap_file_pages() takes a relatively long time for each page. - The read markers are initially zero, which is correct: - STM_SEGMENT->transaction_read_version never contains zero, - so a null read marker means "not read" whatever the - current transaction_read_version is. + The read markers are initially zero, but we set anyway + transaction_read_version to 0xff in order to force the first + transaction to "clear" the read markers by mapping a different, + private range of addresses. */ setup_sync(); @@ -115,6 +158,7 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + list_free(pr->modified_old_objects_markers); list_free(pr->young_weakrefs); list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); @@ -124,6 +168,7 @@ munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; + close_fd_mmap(stm_object_pages_fd); teardown_core(); teardown_sync(); @@ -154,11 +199,13 @@ struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; + STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD); } static void _done_shadow_stack(stm_thread_local_t *tl) { - assert(tl->shadowstack >= tl->shadowstack_base); + assert(tl->shadowstack > tl->shadowstack_base); + assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD); char *start = (char *)tl->shadowstack_base; _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); diff --git a/c7/stm/setup.h b/c7/stm/setup.h new file mode 100644 --- /dev/null +++ b/c7/stm/setup.h @@ -0,0 +1,5 @@ + +static char *setup_mmap(char *reason, int *map_fd); +static void close_fd_mmap(int map_fd); +static void setup_protection_settings(void); +static pthread_t *_get_cpth(stm_thread_local_t *); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -2,6 +2,10 @@ #include #include +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + /* Each segment can be in one of three possible states, described by the segment variable 'safe_point': @@ -260,6 +264,18 @@ static bool _safe_points_requested = false; #endif +static void signal_other_to_commit_soon(struct stm_priv_segment_info_s *other_pseg) +{ + assert(_has_mutex()); + /* never overwrite abort signals or safepoint requests + (too messy to deal with) */ + if (!other_pseg->signalled_to_commit_soon + && !is_abort(other_pseg->pub.nursery_end) + && !pause_signalled) { + other_pseg->pub.nursery_end = NSE_SIGCOMMITSOON; + } +} + static void signal_everybody_to_pause_running(void) { assert(_safe_points_requested == false); @@ -323,7 +339,21 @@ if (STM_SEGMENT->nursery_end == NURSERY_END) break; /* no safe point requested */ + if (STM_SEGMENT->nursery_end == NSE_SIGCOMMITSOON) { + if (previous_state == -1) { + previous_state = change_timing_state(STM_TIME_SYNC_COMMIT_SOON); + } + + STM_PSEGMENT->signalled_to_commit_soon = true; + stmcb_commit_soon(); + if (!pause_signalled) { + STM_SEGMENT->nursery_end = NURSERY_END; + break; + } + STM_SEGMENT->nursery_end = NSE_SIGPAUSE; + } assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE); + assert(pause_signalled); /* If we are requested to enter a safe-point, we cannot proceed now. Wait until the safe-point request is removed for us. */ diff --git a/c7/stm/timing.c b/c7/stm/timing.c --- a/c7/stm/timing.c +++ b/c7/stm/timing.c @@ -25,18 +25,26 @@ return oldstate; } -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate) +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate) { TIMING_CHANGE(tl, newstate); + return elasped; } static void timing_end_transaction(enum stm_time_e attribute_to) { stm_thread_local_t *tl = STM_SEGMENT->running_thread; TIMING_CHANGE(tl, STM_TIME_OUTSIDE_TRANSACTION); - add_timing(tl, attribute_to, tl->timing[STM_TIME_RUN_CURRENT]); + double time_this_transaction = tl->timing[STM_TIME_RUN_CURRENT]; + add_timing(tl, attribute_to, time_this_transaction); tl->timing[STM_TIME_RUN_CURRENT] = 0.0f; + + if (attribute_to != STM_TIME_RUN_COMMITTED) { + struct stm_priv_segment_info_s *pseg = + get_priv_segment(STM_SEGMENT->segment_num); + marker_copy(tl, pseg, attribute_to, time_this_transaction); + } } static const char *timer_names[] = { @@ -51,6 +59,7 @@ "wait write read", "wait inevitable", "wait other", + "sync commit soon", "bookkeeping", "minor gc", "major gc", @@ -70,9 +79,13 @@ s_mutex_lock(); fprintf(stderr, "thread %p:\n", tl); for (i = 0; i < _STM_TIME_N; i++) { - fprintf(stderr, " %-24s %9u %.3f s\n", + fprintf(stderr, " %-24s %9u %8.3f s\n", timer_names[i], tl->events[i], (double)tl->timing[i]); } + fprintf(stderr, " %-24s %6s %11.6f s\n", + "longest recorded marker", "", tl->longest_marker_time); + fprintf(stderr, " \"%.*s\"\n", + (int)_STM_MARKER_LEN, tl->longest_marker_self); s_mutex_unlock(); } } diff --git a/c7/stm/timing.h b/c7/stm/timing.h --- a/c7/stm/timing.h +++ b/c7/stm/timing.h @@ -8,7 +8,7 @@ } static enum stm_time_e change_timing_state(enum stm_time_e newstate); -static void change_timing_state_tl(stm_thread_local_t *tl, - enum stm_time_e newstate); +static double change_timing_state_tl(stm_thread_local_t *tl, + enum stm_time_e newstate); static void timing_end_transaction(enum stm_time_e attribute_to); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -8,6 +8,7 @@ #include "stm/pages.h" #include "stm/gcpage.h" #include "stm/sync.h" +#include "stm/setup.h" #include "stm/largemalloc.h" #include "stm/nursery.h" #include "stm/contention.h" @@ -15,6 +16,7 @@ #include "stm/fprintcolor.h" #include "stm/weakref.h" #include "stm/timing.h" +#include "stm/marker.h" #include "stm/misc.c" #include "stm/list.c" @@ -35,3 +37,4 @@ #include "stm/fprintcolor.c" #include "stm/weakref.c" #include "stm/timing.c" +#include "stm/marker.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -66,6 +66,7 @@ STM_TIME_WAIT_WRITE_READ, STM_TIME_WAIT_INEVITABLE, STM_TIME_WAIT_OTHER, + STM_TIME_SYNC_COMMIT_SOON, STM_TIME_BOOKKEEPING, STM_TIME_MINOR_GC, STM_TIME_MAJOR_GC, @@ -73,6 +74,8 @@ _STM_TIME_N }; +#define _STM_MARKER_LEN 80 + typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; @@ -90,6 +93,11 @@ float timing[_STM_TIME_N]; double _timing_cur_start; enum stm_time_e _timing_cur_state; + /* the marker with the longest associated time so far */ + enum stm_time_e longest_marker_state; + double longest_marker_time; + char longest_marker_self[_STM_MARKER_LEN]; + char longest_marker_other[_STM_MARKER_LEN]; /* the next fields are handled internally by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; @@ -213,9 +221,13 @@ The "size rounded up" must be a multiple of 8 and at least 16. "Tracing" an object means enumerating all GC references in it, by invoking the callback passed as argument. + stmcb_commit_soon() is called when it is advised to commit + the transaction as soon as possible in order to avoid conflicts + or improve performance in general. */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +extern void stmcb_commit_soon(void); /* Allocate an object of the given size, which must be a multiple @@ -268,6 +280,8 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) +#define STM_STACK_MARKER_NEW (-41) +#define STM_STACK_MARKER_OLD (-43) /* Every thread needs to have a corresponding stm_thread_local_t @@ -370,6 +384,43 @@ void stm_flush_timing(stm_thread_local_t *tl, int verbose); +/* The markers pushed in the shadowstack are an odd number followed by a + regular pointer. When needed, this library invokes this callback to + turn this pair into a human-readable explanation. */ +extern void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); +extern void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + +/* Conventience macros to push the markers into the shadowstack */ +#define STM_PUSH_MARKER(tl, odd_num, p) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + STM_PUSH_ROOT(tl, _odd_num); \ + STM_PUSH_ROOT(tl, p); \ +} while (0) + +#define STM_POP_MARKER(tl) ({ \ + object_t *_popped = STM_POP_ROOT_RET(tl); \ + STM_POP_ROOT_RET(tl); \ + _popped; \ +}) + +#define STM_UPDATE_MARKER_NUM(tl, odd_num) do { \ + uintptr_t _odd_num = (odd_num); \ + assert(_odd_num & 1); \ + struct stm_shadowentry_s *_ss = (tl).shadowstack - 2; \ + while (!(((uintptr_t)(_ss->ss)) & 1)) { \ + _ss--; \ + assert(_ss >= (tl).shadowstack_base); \ + } \ + _ss->ss = (object_t *)_odd_num; \ +} while (0) + +char *_stm_expand_marker(void); + + /* ==================== END ==================== */ #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -12,6 +12,8 @@ #define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... +#define STM_STACK_MARKER_NEW ... +#define STM_STACK_MARKER_OLD ... struct stm_shadowentry_s { object_t *ss; @@ -26,6 +28,10 @@ int associated_segment_num; uint32_t events[]; float timing[]; + int longest_marker_state; + double longest_marker_time; + char longest_marker_self[]; + char longest_marker_other[]; ...; } stm_thread_local_t; @@ -121,6 +127,17 @@ #define STM_TIME_SYNC_PAUSE ... void stm_flush_timing(stm_thread_local_t *, int); + +void (*stmcb_expand_marker)(char *segment_base, uintptr_t odd_number, + object_t *following_object, + char *outputbuf, size_t outputbufsize); +void (*stmcb_debug_print)(const char *cause, double time, + const char *marker); + +void stm_push_marker(stm_thread_local_t *, uintptr_t, object_t *); +void stm_update_marker_num(stm_thread_local_t *, uintptr_t); +void stm_pop_marker(stm_thread_local_t *); +char *_stm_expand_marker(void); """) @@ -275,6 +292,24 @@ } } +void stm_push_marker(stm_thread_local_t *tl, uintptr_t onum, object_t *ob) +{ + STM_PUSH_MARKER(*tl, onum, ob); +} + +void stm_update_marker_num(stm_thread_local_t *tl, uintptr_t onum) +{ + STM_UPDATE_MARKER_NUM(*tl, onum); +} + +void stm_pop_marker(stm_thread_local_t *tl) +{ + STM_POP_MARKER(*tl); +} + +void stmcb_commit_soon() +{ +} ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_LARGEMALLOC_TEST', '1'), @@ -446,6 +481,8 @@ self.current_thread = 0 def teardown_method(self, meth): + lib.stmcb_expand_marker = ffi.NULL + lib.stmcb_debug_print = ffi.NULL tl = self.tls[self.current_thread] if lib._stm_in_transaction(tl) and lib.stm_is_inevitable(): self.commit_transaction() # must succeed! @@ -517,7 +554,8 @@ def pop_root(self): tl = self.tls[self.current_thread] curlength = tl.shadowstack - tl.shadowstack_base - if curlength == 0: + assert curlength >= 1 + if curlength == 1: raise EmptyStack assert 0 < curlength <= SHADOWSTACK_LENGTH tl.shadowstack -= 1 diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -228,3 +228,22 @@ self.start_transaction() assert stm_get_char(self.get_thread_local_obj()) == 'L' + + def test_marker_1(self): + self.start_transaction() + p1 = stm_allocate(600) + stm_set_char(p1, 'o') + self.push_root(p1) + self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW)) + p2 = stm_allocate(600) + stm_set_char(p2, 't') + self.push_root(p2) + stm_major_collect() + assert lib._stm_total_allocated() == 2 * 616 + # + p2 = self.pop_root() + m = self.pop_root() + assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD) + p1 = self.pop_root() + assert stm_get_char(p1) == 'o' + assert stm_get_char(p2) == 't' diff --git a/c7/test/test_marker.py b/c7/test/test_marker.py new file mode 100644 --- /dev/null +++ b/c7/test/test_marker.py @@ -0,0 +1,340 @@ +from support import * +import py, time + +class TestMarker(BaseTest): + + def test_marker_odd_simple(self): + self.start_transaction() + self.push_root(ffi.cast("object_t *", 29)) + stm_minor_collect() + stm_major_collect() + # assert did not crash + x = self.pop_root() + assert int(ffi.cast("uintptr_t", x)) == 29 + + def test_abort_marker_no_shadowstack(self): + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_OUTSIDE_TRANSACTION + assert tl.longest_marker_time == 0.0 + # + self.start_transaction() + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert tl.longest_marker_self[0] == '\x00' + assert tl.longest_marker_other[0] == '\x00' + + def test_abort_marker_shadowstack(self): + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert tl.longest_marker_self[0] == '\x00' + assert tl.longest_marker_other[0] == '\x00' + + def test_abort_marker_no_shadowstack_cb(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + seen.append(1) + lib.stmcb_expand_marker = expand_marker + seen = [] + # + self.start_transaction() + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_self[0] == '\x00' + assert not seen + + def test_abort_marker_shadowstack_cb(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + start = time.time() + while abs(time.time() - start) <= 0.1: + pass + self.abort_transaction() + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_OTHER + assert 0.099 <= tl.longest_marker_time <= 0.9 + assert ffi.string(tl.longest_marker_self) == '29 %r' % (p,) + assert ffi.string(tl.longest_marker_other) == '' + + def test_macros(self): + self.start_transaction() + p = stm_allocate(16) + tl = self.get_stm_thread_local() + lib.stm_push_marker(tl, 29, p) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 29) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + lib.stm_update_marker_num(tl, 27) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 27) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + self.push_root(p) + lib.stm_update_marker_num(tl, 27) + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == p + p1 = self.pop_root() + assert p1 == ffi.cast("object_t *", 27) + py.test.raises(EmptyStack, self.pop_root) + # + lib.stm_push_marker(tl, 29, p) + lib.stm_pop_marker(tl) + py.test.raises(EmptyStack, self.pop_root) + + def test_stm_expand_marker(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d %r\x00' % (number, ptr) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + self.push_root(stm_allocate(32)) + self.push_root(stm_allocate(16)) + raw = lib._stm_expand_marker() + assert ffi.string(raw) == '29 %r' % (p,) + + def test_stmcb_debug_print(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '<<<%d>>>\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + @ffi.callback("void(char *, double, char *)") + def debug_print(cause, time, marker): + if 0.0 < time < 1.0: + time = "time_ok" + seen.append((ffi.string(cause), time, ffi.string(marker))) + seen = [] + lib.stmcb_expand_marker = expand_marker + lib.stmcb_debug_print = debug_print + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(p) + self.abort_transaction() + # + assert seen == [("run aborted other", "time_ok", "<<<29>>>")] + + def test_multiple_markers(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + seen.append(number) + s = '%d %r\x00' % (number, ptr == ffi.NULL) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + seen = [] + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + self.push_root(ffi.cast("object_t *", 27)) + self.push_root(p) + self.push_root(ffi.cast("object_t *", 29)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + raw = lib._stm_expand_marker() + assert ffi.string(raw) == '29 True' + assert seen == [29] + + def test_double_abort_markers_cb_write_write(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + p = stm_allocate_old(16) + # + self.start_transaction() + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_set_char(p, 'A') + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_minor_collect() + # + self.switch(1) + self.start_transaction() + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + py.test.raises(Conflict, stm_set_char, p, 'B') + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_WRITE_WRITE + assert ffi.string(tl.longest_marker_self) == '21' + assert ffi.string(tl.longest_marker_other) == '19' + + def test_double_abort_markers_cb_inevitable(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + c = (base + int(ffi.cast("uintptr_t", ptr)))[8] + s = '%d %r\x00' % (number, c) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker + # + self.start_transaction() + p = stm_allocate(16) + stm_set_char(p, 'A') + self.push_root(ffi.cast("object_t *", 19)) + self.push_root(ffi.cast("object_t *", p)) + self.become_inevitable() + self.pop_root() + self.pop_root() + self.push_root(ffi.cast("object_t *", 17)) + self.push_root(ffi.cast("object_t *", ffi.NULL)) + stm_minor_collect() + # + self.switch(1) + self.start_transaction() + p = stm_allocate(16) + stm_set_char(p, 'B') + self.push_root(ffi.cast("object_t *", 21)) + self.push_root(ffi.cast("object_t *", p)) + py.test.raises(Conflict, self.become_inevitable) + # + tl = self.get_stm_thread_local() + assert tl.longest_marker_state == lib.STM_TIME_RUN_ABORTED_INEVITABLE + assert ffi.string(tl.longest_marker_self) == "21 'B'" + assert ffi.string(tl.longest_marker_other) == "19 'A'" + + def test_read_write_contention(self): + @ffi.callback("void(char *, uintptr_t, object_t *, char *, size_t)") + def expand_marker(base, number, ptr, outbuf, outbufsize): + s = '%d\x00' % (number,) + assert len(s) <= outbufsize + outbuf[0:len(s)] = s + lib.stmcb_expand_marker = expand_marker From noreply at buildbot.pypy.org Tue May 13 17:53:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 May 2014 17:53:16 +0200 (CEST) Subject: [pypy-commit] stmgc default: This sentence no longer makes sense. Message-ID: <20140513155316.853081C0320@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1209:d164a5bcad5e Date: 2014-05-13 17:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/d164a5bcad5e/ Log: This sentence no longer makes sense. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -78,9 +78,7 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. Note that - every object takes three list items: the object, and two words for - the location marker. */ + that need to be copied to other segments upon commit. */ struct list_s *modified_old_objects; /* For each entry in 'modified_old_objects', we have two entries From noreply at buildbot.pypy.org Tue May 13 18:58:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 13 May 2014 18:58:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: disable transaction breaks in the JIT when there are no threads (to be done for non-jit too) Message-ID: <20140513165856.DC92F1D29B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71489:ecbf8bb70f4c Date: 2014-05-13 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/ecbf8bb70f4c/ Log: disable transaction breaks in the JIT when there are no threads (to be done for non-jit too) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -56,9 +56,9 @@ pypyjitdriver.jit_merge_point(ec=ec, frame=self, next_instr=next_instr, pycode=pycode, is_being_profiled=self.is_being_profiled) - # nothing inbetween! - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() + if self.space.threadlocals.threads_running: # quasi-immutable field + if rstm.jit_stm_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) @@ -88,8 +88,9 @@ self.last_instr = intmask(jumpto) ec.bytecode_trace(self, decr_by) jumpto = r_uint(self.last_instr) - if rstm.jit_stm_should_break_transaction(True): - rstm.jit_stm_transaction_break_point() + if self.space.threadlocals.threads_running: # quasi-immutable field + if rstm.jit_stm_should_break_transaction(True): + rstm.jit_stm_transaction_break_point() # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode(), diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -43,6 +43,8 @@ class STMThreadLocals(BaseThreadLocals): + threads_running = False + _immutable_fields_ = ['threads_running?'] def initialize(self, space): """NOT_RPYTHON: set up a mechanism to send to the C code the value @@ -53,7 +55,6 @@ # assert space.actionflag.setcheckinterval_callback is None space.actionflag.setcheckinterval_callback = setcheckinterval_callback - self.threads_running = False self.seen_main_ec = False def getvalue(self): @@ -73,7 +74,9 @@ self.setvalue(None) def setup_threads(self, space): - self.threads_running = True + if not self.threads_running: + # invalidate quasi-immutable if we have threads: + self.threads_running = True self.configure_transaction_length(space) invoke_around_extcall(rstm.before_external_call, rstm.after_external_call, From noreply at buildbot.pypy.org Tue May 13 20:29:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 13 May 2014 20:29:37 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix jit translation, now that elidable is stricter Message-ID: <20140513182937.B72D21C0320@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71490:5ce8d091277f Date: 2014-05-12 18:17 -0700 http://bitbucket.org/pypy/pypy/changeset/5ce8d091277f/ Log: fix jit translation, now that elidable is stricter diff --git a/pypy/module/_rawffi/alt/interp_struct.py b/pypy/module/_rawffi/alt/interp_struct.py --- a/pypy/module/_rawffi/alt/interp_struct.py +++ b/pypy/module/_rawffi/alt/interp_struct.py @@ -7,7 +7,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import OperationError, oefmt from pypy.module._rawffi.alt.interp_ffitype import W_FFIType from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter @@ -99,12 +99,16 @@ rawmem = rffi.cast(rffi.VOIDP, addr) return W__StructInstance(self, allocate=False, autofree=True, rawmem=rawmem) + def get_type_and_offset_for_field(self, space, w_name): + name = space.str_w(w_name) + try: + return self._get_type_and_offset_for_field(space, name) + except KeyError: + raise OperationError(space.w_AttributeError, w_name) + @jit.elidable_promote('0') - def get_type_and_offset_for_field(self, space, name): - try: - w_field = self.name2w_field[name] - except KeyError: - raise oefmt(space.w_AttributeError, '%s', name) + def _get_type_and_offset_for_field(self, space, name): + w_field = self.name2w_field[name] return w_field.w_ffitype, w_field.offset @@ -178,17 +182,15 @@ addr = rffi.cast(rffi.ULONG, self.rawmem) return space.wrap(addr) - @unwrap_spec(name=str) - def getfield(self, space, name): + def getfield(self, space, w_name): w_ffitype, offset = self.structdescr.get_type_and_offset_for_field( - space, name) + space, w_name) field_getter = GetFieldConverter(space, self.rawmem, offset) return field_getter.do_and_wrap(w_ffitype) - @unwrap_spec(name=str) - def setfield(self, space, name, w_value): + def setfield(self, space, w_name, w_value): w_ffitype, offset = self.structdescr.get_type_and_offset_for_field( - space, name) + space, w_name) field_setter = SetFieldConverter(space, self.rawmem, offset) field_setter.unwrap_and_do(w_ffitype, w_value) From noreply at buildbot.pypy.org Tue May 13 20:29:39 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 13 May 2014 20:29:39 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix handling of EnvironmentError filenames Message-ID: <20140513182939.04F631C0320@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71491:cdd83d6f8d21 Date: 2014-05-13 10:56 -0700 http://bitbucket.org/pypy/pypy/changeset/cdd83d6f8d21/ Log: fix handling of EnvironmentError filenames diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -1,3 +1,4 @@ +# encoding: utf-8 from rpython.tool.udir import udir import os @@ -55,6 +56,14 @@ raises(IOError, _io.FileIO, fd, "rb") os.close(fd) + def test_open_non_existent_unicode(self): + import _io + import os + path = os.path.join(self.tmpdir, '_pypy-日本') + exc = raises(IOError, _io.FileIO, path) + expected = "[Errno 2] No such file or directory: %r" % path + assert str(exc.value) == expected + def test_readline(self): import _io f = _io.FileIO(self.tmpfile, 'rb') diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -389,14 +389,14 @@ def descr_str(self, space): if (not space.is_w(self.w_errno, space.w_None) and not space.is_w(self.w_strerror, space.w_None)): - errno = space.str_w(space.str(self.w_errno)) - strerror = space.str_w(space.str(self.w_strerror)) + errno = space.unicode_w(space.str(self.w_errno)) + strerror = space.unicode_w(space.str(self.w_strerror)) if not space.is_w(self.w_filename, space.w_None): - return space.wrap("[Errno %s] %s: %s" % ( + return space.wrap(u"[Errno %s] %s: %s" % ( errno, strerror, - space.str_w(space.repr(self.w_filename)))) - return space.wrap("[Errno %s] %s" % ( + space.unicode_w(space.repr(self.w_filename)))) + return space.wrap(u"[Errno %s] %s" % ( errno, strerror, )) @@ -441,13 +441,15 @@ def descr_str(self, space): if (not space.is_w(self.w_winerror, space.w_None) and not space.is_w(self.w_strerror, space.w_None)): + winerror = space.int_w(self.w_winerror) + strerror = space.unicode_w(self.w_strerror) if not space.is_w(self.w_filename, space.w_None): - return space.wrap("[Error %d] %s: %s" % ( - space.int_w(self.w_winerror), - space.str_w(self.w_strerror), - space.str_w(self.w_filename))) - return space.wrap("[Error %d] %s" % (space.int_w(self.w_winerror), - space.str_w(self.w_strerror))) + return space.wrap(u"[Error %d] %s: %s" % ( + winerror, + strerror, + space.unicode_w(self.w_filename))) + return space.wrap(u"[Error %d] %s" % (winerror, + strerror)) return W_BaseException.descr_str(self, space) if hasattr(rwin32, 'build_winerror_to_errno'): From noreply at buildbot.pypy.org Tue May 13 20:29:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 13 May 2014 20:29:40 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix cpyext's version Message-ID: <20140513182940.35FD91C0320@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71492:1c0d410559fa Date: 2014-05-13 11:09 -0700 http://bitbucket.org/pypy/pypy/changeset/1c0d410559fa/ Log: fix cpyext's version diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,7 +21,7 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 2 -#define PY_MICRO_VERSION 3 +#define PY_MICRO_VERSION 5 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 From noreply at buildbot.pypy.org Tue May 13 20:29:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 13 May 2014 20:29:41 +0200 (CEST) Subject: [pypy-commit] pypy py3k: update the sre support code to CPython 3.2.5's Message-ID: <20140513182941.8586F1C0320@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71493:947c69e3867c Date: 2014-05-13 11:28 -0700 http://bitbucket.org/pypy/pypy/changeset/947c69e3867c/ Log: update the sre support code to CPython 3.2.5's diff --git a/pypy/module/cpyext/test/_sre.c b/pypy/module/cpyext/test/_sre.c --- a/pypy/module/cpyext/test/_sre.c +++ b/pypy/module/cpyext/test/_sre.c @@ -453,7 +453,7 @@ } else { /* (32 bits per code word) */ - if (ch < 256 && (set[ch >> 5] & (1 << (ch & 31)))) + if (ch < 256 && (set[ch >> 5] & (1u << (ch & 31)))) return ok; set += 8; } @@ -492,7 +492,7 @@ block = -1; set += 64; if (block >=0 && - (set[block*8 + ((ch & 255)>>5)] & (1 << (ch & 31)))) + (set[block*8 + ((ch & 255)>>5)] & (1u << (ch & 31)))) return ok; set += count*8; } @@ -518,7 +518,7 @@ Py_ssize_t i; /* adjust end */ - if (maxcount < end - ptr && maxcount != 65535) + if (maxcount < end - ptr && maxcount != SRE_MAXREPEAT) end = ptr + maxcount; switch (pattern[0]) { @@ -1133,7 +1133,7 @@ } else { /* general case */ LASTMARK_SAVE(); - while ((Py_ssize_t)ctx->pattern[2] == 65535 + while ((Py_ssize_t)ctx->pattern[2] == SRE_MAXREPEAT || ctx->count <= (Py_ssize_t)ctx->pattern[2]) { state->ptr = ctx->ptr; DO_JUMP(JUMP_MIN_REPEAT_ONE,jump_min_repeat_one, @@ -1219,7 +1219,7 @@ } if ((ctx->count < ctx->u.rep->pattern[2] || - ctx->u.rep->pattern[2] == 65535) && + ctx->u.rep->pattern[2] == SRE_MAXREPEAT) && state->ptr != ctx->u.rep->last_ptr) { /* we may have enough matches, but if we can match another item, do so */ @@ -1296,13 +1296,18 @@ LASTMARK_RESTORE(); - if (ctx->count >= ctx->u.rep->pattern[2] - && ctx->u.rep->pattern[2] != 65535) + if ((ctx->count >= ctx->u.rep->pattern[2] + && ctx->u.rep->pattern[2] != SRE_MAXREPEAT) || + state->ptr == ctx->u.rep->last_ptr) RETURN_FAILURE; ctx->u.rep->count = ctx->count; + /* zero-width match protection */ + DATA_PUSH(&ctx->u.rep->last_ptr); + ctx->u.rep->last_ptr = state->ptr; DO_JUMP(JUMP_MIN_UNTIL_3,jump_min_until_3, ctx->u.rep->pattern+3); + DATA_POP(&ctx->u.rep->last_ptr); if (ret) { RETURN_ON_ERROR(ret); RETURN_SUCCESS; @@ -1630,7 +1635,7 @@ static PyObject * sre_codesize(PyObject* self, PyObject *unused) { - return Py_BuildValue("l", sizeof(SRE_CODE)); + return PyLong_FromSize_t(sizeof(SRE_CODE)); } static PyObject * @@ -2468,7 +2473,7 @@ return NULL; if (subn) - return Py_BuildValue("Ni", item, n); + return Py_BuildValue("Nn", item, n); return item; @@ -2560,35 +2565,35 @@ } PyDoc_STRVAR(pattern_match_doc, -"match(string[, pos[, endpos]]) --> match object or None.\n\ +"match(string[, pos[, endpos]]) -> match object or None.\n\n\ Matches zero or more characters at the beginning of the string"); PyDoc_STRVAR(pattern_search_doc, -"search(string[, pos[, endpos]]) --> match object or None.\n\ +"search(string[, pos[, endpos]]) -> match object or None.\n\n\ Scan through string looking for a match, and return a corresponding\n\ - MatchObject instance. Return None if no position in the string matches."); + match object instance. Return None if no position in the string matches."); PyDoc_STRVAR(pattern_split_doc, -"split(string[, maxsplit = 0]) --> list.\n\ +"split(string[, maxsplit = 0]) -> list.\n\n\ Split string by the occurrences of pattern."); PyDoc_STRVAR(pattern_findall_doc, -"findall(string[, pos[, endpos]]) --> list.\n\ +"findall(string[, pos[, endpos]]) -> list.\n\n\ Return a list of all non-overlapping matches of pattern in string."); PyDoc_STRVAR(pattern_finditer_doc, -"finditer(string[, pos[, endpos]]) --> iterator.\n\ +"finditer(string[, pos[, endpos]]) -> iterator.\n\n\ Return an iterator over all non-overlapping matches for the \n\ RE pattern in string. For each match, the iterator returns a\n\ match object."); PyDoc_STRVAR(pattern_sub_doc, -"sub(repl, string[, count = 0]) --> newstring\n\ +"sub(repl, string[, count = 0]) -> newstring.\n\n\ Return the string obtained by replacing the leftmost non-overlapping\n\ occurrences of pattern in string by the replacement repl."); PyDoc_STRVAR(pattern_subn_doc, -"subn(repl, string[, count = 0]) --> (newstring, number of subs)\n\ +"subn(repl, string[, count = 0]) -> (newstring, number of subs)\n\n\ Return the tuple (new_string, number_of_subs_made) found by replacing\n\ the leftmost non-overlapping occurrences of pattern with the\n\ replacement repl."); @@ -2696,6 +2701,13 @@ for (i = 0; i < n; i++) { PyObject *o = PyList_GET_ITEM(code, i); unsigned long value = PyLong_AsUnsignedLong(o); + if (value == (unsigned long)-1 && PyErr_Occurred()) { + if (PyErr_ExceptionMatches(PyExc_OverflowError)) { + PyErr_SetString(PyExc_OverflowError, + "regular expression code size limit exceeded"); + } + break; + } self->code[i] = (SRE_CODE) value; if ((unsigned long) self->code[i] != value) { PyErr_SetString(PyExc_OverflowError, @@ -3066,10 +3078,8 @@ GET_ARG; max = arg; if (min > max) FAIL; -#ifdef Py_UNICODE_WIDE - if (max > 65535) + if (max > SRE_MAXREPEAT) FAIL; -#endif if (!_validate_inner(code, code+skip-4, groups)) FAIL; code += skip-4; @@ -3087,10 +3097,8 @@ GET_ARG; max = arg; if (min > max) FAIL; -#ifdef Py_UNICODE_WIDE - if (max > 65535) + if (max > SRE_MAXREPEAT) FAIL; -#endif if (!_validate_inner(code, code+skip-3, groups)) FAIL; code += skip-3; @@ -3421,7 +3429,7 @@ } /* mark is -1 if group is undefined */ - return Py_BuildValue("i", self->mark[index*2]); + return PyLong_FromSsize_t(self->mark[index*2]); } static PyObject* @@ -3444,7 +3452,7 @@ } /* mark is -1 if group is undefined */ - return Py_BuildValue("i", self->mark[index*2+1]); + return PyLong_FromSsize_t(self->mark[index*2+1]); } LOCAL(PyObject*) @@ -3577,14 +3585,54 @@ #endif } +PyDoc_STRVAR(match_doc, +"The result of re.match() and re.search().\n\ +Match objects always have a boolean value of True."); + +PyDoc_STRVAR(match_group_doc, +"group([group1, ...]) -> str or tuple.\n\n\ + Return subgroup(s) of the match by indices or names.\n\ + For 0 returns the entire match."); + +PyDoc_STRVAR(match_start_doc, +"start([group=0]) -> int.\n\n\ + Return index of the start of the substring matched by group."); + +PyDoc_STRVAR(match_end_doc, +"end([group=0]) -> int.\n\n\ + Return index of the end of the substring matched by group."); + +PyDoc_STRVAR(match_span_doc, +"span([group]) -> tuple.\n\n\ + For MatchObject m, return the 2-tuple (m.start(group), m.end(group))."); + +PyDoc_STRVAR(match_groups_doc, +"groups([default=None]) -> tuple.\n\n\ + Return a tuple containing all the subgroups of the match, from 1.\n\ + The default argument is used for groups\n\ + that did not participate in the match"); + +PyDoc_STRVAR(match_groupdict_doc, +"groupdict([default=None]) -> dict.\n\n\ + Return a dictionary containing all the named subgroups of the match,\n\ + keyed by the subgroup name. The default argument is used for groups\n\ + that did not participate in the match"); + +PyDoc_STRVAR(match_expand_doc, +"expand(template) -> str.\n\n\ + Return the string obtained by doing backslash substitution\n\ + on the string template, as done by the sub() method."); + static PyMethodDef match_methods[] = { - {"group", (PyCFunction) match_group, METH_VARARGS}, - {"start", (PyCFunction) match_start, METH_VARARGS}, - {"end", (PyCFunction) match_end, METH_VARARGS}, - {"span", (PyCFunction) match_span, METH_VARARGS}, - {"groups", (PyCFunction) match_groups, METH_VARARGS|METH_KEYWORDS}, - {"groupdict", (PyCFunction) match_groupdict, METH_VARARGS|METH_KEYWORDS}, - {"expand", (PyCFunction) match_expand, METH_O}, + {"group", (PyCFunction) match_group, METH_VARARGS, match_group_doc}, + {"start", (PyCFunction) match_start, METH_VARARGS, match_start_doc}, + {"end", (PyCFunction) match_end, METH_VARARGS, match_end_doc}, + {"span", (PyCFunction) match_span, METH_VARARGS, match_span_doc}, + {"groups", (PyCFunction) match_groups, METH_VARARGS|METH_KEYWORDS, + match_groups_doc}, + {"groupdict", (PyCFunction) match_groupdict, METH_VARARGS|METH_KEYWORDS, + match_groupdict_doc}, + {"expand", (PyCFunction) match_expand, METH_O, match_expand_doc}, {"__copy__", (PyCFunction) match_copy, METH_NOARGS}, {"__deepcopy__", (PyCFunction) match_deepcopy, METH_O}, {NULL, NULL} @@ -3594,7 +3642,7 @@ match_lastindex_get(MatchObject *self) { if (self->lastindex >= 0) - return Py_BuildValue("i", self->lastindex); + return PyLong_FromSsize_t(self->lastindex); Py_INCREF(Py_None); return Py_None; } @@ -3663,7 +3711,7 @@ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ + match_doc, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ @@ -3937,6 +3985,12 @@ Py_DECREF(x); } + x = PyLong_FromUnsignedLong(SRE_MAXREPEAT); + if (x) { + PyDict_SetItemString(d, "MAXREPEAT", x); + Py_DECREF(x); + } + x = PyUnicode_FromString(copyright); if (x) { PyDict_SetItemString(d, "copyright", x); diff --git a/pypy/module/cpyext/test/sre.h b/pypy/module/cpyext/test/sre.h --- a/pypy/module/cpyext/test/sre.h +++ b/pypy/module/cpyext/test/sre.h @@ -14,11 +14,12 @@ #include "sre_constants.h" /* size of a code word (must be unsigned short or larger, and - large enough to hold a Py_UNICODE character) */ -#ifdef Py_UNICODE_WIDE + large enough to hold a UCS4 character) */ #define SRE_CODE Py_UCS4 +#if SIZEOF_SIZE_T > 4 +# define SRE_MAXREPEAT (~(SRE_CODE)0) #else -#define SRE_CODE unsigned short +# define SRE_MAXREPEAT ((SRE_CODE)PY_SSIZE_T_MAX + 1u) #endif typedef struct { From noreply at buildbot.pypy.org Tue May 13 21:03:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 May 2014 21:03:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Some extra instructions Message-ID: <20140513190309.A03C71D2E94@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71494:a8c3e1620409 Date: 2014-05-13 21:02 +0200 http://bitbucket.org/pypy/pypy/changeset/a8c3e1620409/ Log: Some extra instructions diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,7 +522,7 @@ # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations - 'bextr', + 'andn', 'bextr', 'blsi', 'blsmask', 'blsr', 'tzcnt', 'lzcnt', ]) # a partial list is hopefully good enough for now; it's all to support From noreply at buildbot.pypy.org Tue May 13 22:07:55 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 May 2014 22:07:55 +0200 (CEST) Subject: [pypy-commit] pypy default: move BoolRepr to rpython.rtyper.rbool Message-ID: <20140513200755.057491D2D24@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71495:3365b7b9ccf6 Date: 2014-05-13 16:51 +0100 http://bitbucket.org/pypy/pypy/changeset/3365b7b9ccf6/ Log: move BoolRepr to rpython.rtyper.rbool diff --git a/rpython/rtyper/rbool.py b/rpython/rtyper/rbool.py --- a/rpython/rtyper/rbool.py +++ b/rpython/rtyper/rbool.py @@ -1,21 +1,16 @@ from rpython.annotator import model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Unsigned, Bool, Float -from rpython.rtyper.rmodel import IntegerRepr, BoolRepr, log +from rpython.rtyper.rmodel import IntegerRepr, log from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomeBool): - def rtyper_makerepr(self, rtyper): - return bool_repr - - def rtyper_makekey(self): - return self.__class__, - -bool_repr = BoolRepr() - - -class __extend__(BoolRepr): +class BoolRepr(IntegerRepr): + lowleveltype = Bool + # NB. no 'opprefix' here. Use 'as_int' systematically. + def __init__(self): + from rpython.rtyper.rint import signed_repr + self.as_int = signed_repr def convert_const(self, value): if not isinstance(value, bool): @@ -23,7 +18,7 @@ return value def rtype_bool(_, hop): - vlist = hop.inputargs(Bool) + vlist = hop.inputargs(bool_repr) return vlist[0] def rtype_int(_, hop): @@ -36,6 +31,16 @@ hop.exception_cannot_occur() return vlist[0] +bool_repr = BoolRepr() + + +class __extend__(annmodel.SomeBool): + def rtyper_makerepr(self, rtyper): + return bool_repr + + def rtyper_makekey(self): + return self.__class__, + # # _________________________ Conversions _________________________ diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -7,7 +7,8 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, SignedLongLong, UnsignedLongLong, Bool, Float) -from rpython.rtyper.rmodel import FloatRepr, IntegerRepr, BoolRepr, log +from rpython.rtyper.rmodel import FloatRepr, IntegerRepr, log +from rpython.rtyper.rbool import BoolRepr from rpython.rtyper.rstr import AbstractStringRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -342,13 +342,6 @@ opprefix = property(_get_opprefix) -class BoolRepr(IntegerRepr): - lowleveltype = Bool - # NB. no 'opprefix' here. Use 'as_int' systematically. - def __init__(self): - from rpython.rtyper.rint import signed_repr - self.as_int = signed_repr - class VoidRepr(Repr): lowleveltype = Void def get_ll_eq_function(self): return None From noreply at buildbot.pypy.org Tue May 13 22:07:56 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 May 2014 22:07:56 +0200 (CEST) Subject: [pypy-commit] pypy default: move FloatRepr and IntegerRepr to rfloat and rint respectively Message-ID: <20140513200756.4F2861D2D24@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71496:ea4a7d70ec57 Date: 2014-05-13 21:06 +0100 http://bitbucket.org/pypy/pypy/changeset/ea4a7d70ec57/ Log: move FloatRepr and IntegerRepr to rfloat and rint respectively diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1,7 +1,7 @@ import py from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import SomePtr -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr from rpython.rtyper.lltypesystem.llmemory import itemoffsetof diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,11 +9,11 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory -from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) -from rpython.rtyper.rmodel import inputconst, Repr, IntegerRepr +from rpython.rtyper.rmodel import inputconst, Repr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import (AbstractStringRepr, AbstractCharRepr, AbstractUniCharRepr, AbstractStringIteratorRepr, AbstractLLHelpers, AbstractUnicodeRepr) diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -11,7 +11,8 @@ from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Constant from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import Repr, IntegerRepr +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.error import TyperError from rpython.rtyper.module import ll_os_stat diff --git a/rpython/rtyper/raddress.py b/rpython/rtyper/raddress.py --- a/rpython/rtyper/raddress.py +++ b/rpython/rtyper/raddress.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.llmemory import (NULL, Address, cast_adr_to_int, fakeaddress, sizeof) -from rpython.rtyper.rmodel import Repr, IntegerRepr +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rptr import PtrRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rbool.py b/rpython/rtyper/rbool.py --- a/rpython/rtyper/rbool.py +++ b/rpython/rtyper/rbool.py @@ -1,7 +1,9 @@ from rpython.annotator import model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Unsigned, Bool, Float -from rpython.rtyper.rmodel import IntegerRepr, log +from rpython.rtyper.rmodel import log +from rpython.rtyper.rint import IntegerRepr +from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype @@ -44,6 +46,20 @@ # # _________________________ Conversions _________________________ +class __extend__(pairtype(BoolRepr, FloatRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Bool and r_to.lowleveltype == Float: + log.debug('explicit cast_bool_to_float') + return llops.genop('cast_bool_to_float', [v], resulttype=Float) + return NotImplemented + +class __extend__(pairtype(FloatRepr, BoolRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Float and r_to.lowleveltype == Bool: + log.debug('explicit cast_float_to_bool') + return llops.genop('float_is_true', [v], resulttype=Bool) + return NotImplemented + class __extend__(pairtype(BoolRepr, IntegerRepr)): def convert_from_to((r_from, r_to), v, llops): if r_from.lowleveltype == Bool and r_to.lowleveltype == Unsigned: diff --git a/rpython/rtyper/rbytearray.py b/rpython/rtyper/rbytearray.py --- a/rpython/rtyper/rbytearray.py +++ b/rpython/rtyper/rbytearray.py @@ -1,6 +1,6 @@ from rpython.annotator import model as annmodel from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import IntegerRepr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import AbstractStringRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -1,17 +1,65 @@ from rpython.annotator import model as annmodel from rpython.rlib.objectmodel import _hash_float from rpython.rlib.rarithmetic import base_int -from rpython.rlib.rfloat import formatd from rpython.rlib import jit from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, - SignedLongLong, UnsignedLongLong, Bool, Float) -from rpython.rtyper.rmodel import FloatRepr, IntegerRepr, log -from rpython.rtyper.rbool import BoolRepr -from rpython.rtyper.rstr import AbstractStringRepr +from rpython.rtyper.lltypesystem.lltype import (Signed, Bool, Float) +from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype +class FloatRepr(Repr): + lowleveltype = Float + + def convert_const(self, value): + if not isinstance(value, (int, base_int, float)): # can be bool too + raise TyperError("not a float: %r" % (value,)) + return float(value) + + def get_ll_eq_function(self): + return None + get_ll_gt_function = get_ll_eq_function + get_ll_lt_function = get_ll_eq_function + get_ll_ge_function = get_ll_eq_function + get_ll_le_function = get_ll_eq_function + + def get_ll_hash_function(self): + return _hash_float + + def rtype_bool(_, hop): + vlist = hop.inputargs(Float) + return hop.genop('float_is_true', vlist, resulttype=Bool) + + def rtype_neg(_, hop): + vlist = hop.inputargs(Float) + return hop.genop('float_neg', vlist, resulttype=Float) + + def rtype_pos(_, hop): + vlist = hop.inputargs(Float) + return vlist[0] + + def rtype_abs(_, hop): + vlist = hop.inputargs(Float) + return hop.genop('float_abs', vlist, resulttype=Float) + + def rtype_int(_, hop): + vlist = hop.inputargs(Float) + # int(x) never raises in RPython, you need to use + # rarithmetic.ovfcheck_float_to_int() if you want this + hop.exception_cannot_occur() + return hop.genop('cast_float_to_int', vlist, resulttype=Signed) + + def rtype_float(_, hop): + vlist = hop.inputargs(Float) + hop.exception_cannot_occur() + return vlist[0] + + @jit.elidable + def ll_str(self, f): + from rpython.rlib.rfloat import formatd + return llstr(formatd(f, 'f', 6)) + +float_repr = FloatRepr() class __extend__(annmodel.SomeFloat): def rtyper_makerepr(self, rtyper): @@ -21,9 +69,6 @@ return self.__class__, -float_repr = FloatRepr() - - class __extend__(pairtype(FloatRepr, FloatRepr)): #Arithmetic @@ -76,11 +121,6 @@ def rtype_ge(_, hop): return _rtype_compare_template(hop, 'ge') -class __extend__(pairtype(AbstractStringRepr, FloatRepr)): - def rtype_mod(_, hop): - from rpython.rtyper.lltypesystem.rstr import do_stringformat - return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) - #Helpers FloatRepr,FloatRepr def _rtype_template(hop, func): @@ -92,104 +132,6 @@ return hop.genop('float_'+func, vlist, resulttype=Bool) -class __extend__(FloatRepr): - - def convert_const(self, value): - if not isinstance(value, (int, base_int, float)): # can be bool too - raise TyperError("not a float: %r" % (value,)) - return float(value) - - def get_ll_eq_function(self): - return None - get_ll_gt_function = get_ll_eq_function - get_ll_lt_function = get_ll_eq_function - get_ll_ge_function = get_ll_eq_function - get_ll_le_function = get_ll_eq_function - - def get_ll_hash_function(self): - return _hash_float - - def rtype_bool(_, hop): - vlist = hop.inputargs(Float) - return hop.genop('float_is_true', vlist, resulttype=Bool) - - def rtype_neg(_, hop): - vlist = hop.inputargs(Float) - return hop.genop('float_neg', vlist, resulttype=Float) - - def rtype_pos(_, hop): - vlist = hop.inputargs(Float) - return vlist[0] - - def rtype_abs(_, hop): - vlist = hop.inputargs(Float) - return hop.genop('float_abs', vlist, resulttype=Float) - - def rtype_int(_, hop): - vlist = hop.inputargs(Float) - # int(x) never raises in RPython, you need to use - # rarithmetic.ovfcheck_float_to_int() if you want this - hop.exception_cannot_occur() - return hop.genop('cast_float_to_int', vlist, resulttype=Signed) - - def rtype_float(_, hop): - vlist = hop.inputargs(Float) - hop.exception_cannot_occur() - return vlist[0] - - @jit.elidable - def ll_str(self, f): - return llstr(formatd(f, 'f', 6)) - -# -# _________________________ Conversions _________________________ - -class __extend__(pairtype(IntegerRepr, FloatRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Float: - log.debug('explicit cast_uint_to_float') - return llops.genop('cast_uint_to_float', [v], resulttype=Float) - if r_from.lowleveltype == Signed and r_to.lowleveltype == Float: - log.debug('explicit cast_int_to_float') - return llops.genop('cast_int_to_float', [v], resulttype=Float) - if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Float: - log.debug('explicit cast_longlong_to_float') - return llops.genop('cast_longlong_to_float', [v], resulttype=Float) - if r_from.lowleveltype == UnsignedLongLong and r_to.lowleveltype == Float: - log.debug('explicit cast_ulonglong_to_float') - return llops.genop('cast_ulonglong_to_float', [v], resulttype=Float) - return NotImplemented - -class __extend__(pairtype(FloatRepr, IntegerRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Float and r_to.lowleveltype == Unsigned: - log.debug('explicit cast_float_to_uint') - return llops.genop('cast_float_to_uint', [v], resulttype=Unsigned) - if r_from.lowleveltype == Float and r_to.lowleveltype == Signed: - log.debug('explicit cast_float_to_int') - return llops.genop('cast_float_to_int', [v], resulttype=Signed) - if r_from.lowleveltype == Float and r_to.lowleveltype == SignedLongLong: - log.debug('explicit cast_float_to_longlong') - return llops.genop('cast_float_to_longlong', [v], resulttype=SignedLongLong) - if r_from.lowleveltype == Float and r_to.lowleveltype == UnsignedLongLong: - log.debug('explicit cast_float_to_ulonglong') - return llops.genop('cast_float_to_ulonglong', [v], resulttype=UnsignedLongLong) - return NotImplemented - -class __extend__(pairtype(BoolRepr, FloatRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Bool and r_to.lowleveltype == Float: - log.debug('explicit cast_bool_to_float') - return llops.genop('cast_bool_to_float', [v], resulttype=Float) - return NotImplemented - -class __extend__(pairtype(FloatRepr, BoolRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Float and r_to.lowleveltype == Bool: - log.debug('explicit cast_float_to_bool') - return llops.genop('float_is_true', [v], resulttype=Bool) - return NotImplemented - # ______________________________________________________________________ # Support for r_singlefloat and r_longfloat from rpython.rlib.rarithmetic diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -8,9 +8,166 @@ from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, Char, UniChar, UnsignedLongLong, SignedLongLong, build_number, Number, cast_primitive, typeOf, SignedLongLongLong) -from rpython.rtyper.rmodel import IntegerRepr, inputconst, log +from rpython.rtyper.rfloat import FloatRepr +from rpython.rtyper.rmodel import inputconst, log from rpython.tool.pairtype import pairtype +class IntegerRepr(FloatRepr): + def __init__(self, lowleveltype, opprefix): + self.lowleveltype = lowleveltype + self._opprefix = opprefix + self.as_int = self + + @property + def opprefix(self): + if self._opprefix is None: + raise TyperError("arithmetic not supported on %r, its size is too small" % + self.lowleveltype) + return self._opprefix + + def convert_const(self, value): + if isinstance(value, objectmodel.Symbolic): + return value + T = typeOf(value) + if isinstance(T, Number) or T is Bool: + return cast_primitive(self.lowleveltype, value) + raise TyperError("not an integer: %r" % (value,)) + + def get_ll_eq_function(self): + if getattr(self, '_opprefix', '?') is None: + return ll_eq_shortint + return None + + def get_ll_ge_function(self): + return None + get_ll_gt_function = get_ll_ge_function + get_ll_lt_function = get_ll_ge_function + get_ll_le_function = get_ll_ge_function + + def get_ll_hash_function(self): + if (sys.maxint == 2147483647 and + self.lowleveltype in (SignedLongLong, UnsignedLongLong)): + return ll_hash_long_long + return ll_hash_int + + get_ll_fasthash_function = get_ll_hash_function + + def get_ll_dummyval_obj(self, rtyper, s_value): + # if >= 0, then all negative values are special + if s_value.nonneg and self.lowleveltype is Signed: + return signed_repr # whose ll_dummy_value is -1 + else: + return None + + ll_dummy_value = -1 + + def rtype_chr(_, hop): + vlist = hop.inputargs(Signed) + if hop.has_implicit_exception(ValueError): + hop.exception_is_here() + hop.gendirectcall(ll_check_chr, vlist[0]) + else: + hop.exception_cannot_occur() + return hop.genop('cast_int_to_char', vlist, resulttype=Char) + + def rtype_unichr(_, hop): + vlist = hop.inputargs(Signed) + if hop.has_implicit_exception(ValueError): + hop.exception_is_here() + hop.gendirectcall(ll_check_unichr, vlist[0]) + else: + hop.exception_cannot_occur() + return hop.genop('cast_int_to_unichar', vlist, resulttype=UniChar) + + def rtype_bool(self, hop): + assert self is self.as_int # rtype_is_true() is overridden in BoolRepr + vlist = hop.inputargs(self) + return hop.genop(self.opprefix + 'is_true', vlist, resulttype=Bool) + + #Unary arithmetic operations + + def rtype_abs(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + if hop.s_result.unsigned: + return vlist[0] + else: + return hop.genop(self.opprefix + 'abs', vlist, resulttype=self) + + def rtype_abs_ovf(self, hop): + self = self.as_int + if hop.s_result.unsigned: + raise TyperError("forbidden uint_abs_ovf") + else: + vlist = hop.inputargs(self) + hop.has_implicit_exception(OverflowError) # record we know about it + hop.exception_is_here() + return hop.genop(self.opprefix + 'abs_ovf', vlist, resulttype=self) + + def rtype_invert(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + return hop.genop(self.opprefix + 'invert', vlist, resulttype=self) + + def rtype_neg(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + if hop.s_result.unsigned: + # implement '-r_uint(x)' with unsigned subtraction '0 - x' + zero = self.lowleveltype._defl() + vlist.insert(0, hop.inputconst(self.lowleveltype, zero)) + return hop.genop(self.opprefix + 'sub', vlist, resulttype=self) + else: + return hop.genop(self.opprefix + 'neg', vlist, resulttype=self) + + def rtype_neg_ovf(self, hop): + self = self.as_int + if hop.s_result.unsigned: + # this is supported (and turns into just 0-x) for rbigint.py + hop.exception_cannot_occur() + return self.rtype_neg(hop) + else: + vlist = hop.inputargs(self) + hop.has_implicit_exception(OverflowError) # record we know about it + hop.exception_is_here() + return hop.genop(self.opprefix + 'neg_ovf', vlist, resulttype=self) + + def rtype_pos(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + return vlist[0] + + def rtype_int(self, hop): + if self.lowleveltype in (Unsigned, UnsignedLongLong): + raise TyperError("use intmask() instead of int(r_uint(...))") + vlist = hop.inputargs(Signed) + hop.exception_cannot_occur() + return vlist[0] + + def rtype_float(_, hop): + vlist = hop.inputargs(Float) + hop.exception_cannot_occur() + return vlist[0] + + @jit.elidable + def ll_str(self, i): + from rpython.rtyper.lltypesystem.ll_str import ll_int2dec + return ll_int2dec(i) + + def rtype_hex(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2hex + self = self.as_int + varg = hop.inputarg(self, 0) + true = inputconst(Bool, True) + return hop.gendirectcall(ll_int2hex, varg, true) + + def rtype_oct(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2oct + self = self.as_int + varg = hop.inputarg(self, 0) + true = inputconst(Bool, True) + return hop.gendirectcall(ll_int2oct, varg, true) + _integer_reprs = {} def getintegerrepr(lltype, prefix=None): @@ -235,156 +392,11 @@ repr = hop.rtyper.getrepr(annmodel.unionof(s_int1, s_int2)).as_int vlist = hop.inputargs(repr, repr) hop.exception_is_here() - return hop.genop(repr.opprefix+func, vlist, resulttype=Bool) + return hop.genop(repr.opprefix + func, vlist, resulttype=Bool) # -class __extend__(IntegerRepr): - - def convert_const(self, value): - if isinstance(value, objectmodel.Symbolic): - return value - T = typeOf(value) - if isinstance(T, Number) or T is Bool: - return cast_primitive(self.lowleveltype, value) - raise TyperError("not an integer: %r" % (value,)) - - def get_ll_eq_function(self): - if getattr(self, '_opprefix', '?') is None: - return ll_eq_shortint - return None - - def get_ll_ge_function(self): - return None - get_ll_gt_function = get_ll_ge_function - get_ll_lt_function = get_ll_ge_function - get_ll_le_function = get_ll_ge_function - - def get_ll_hash_function(self): - if (sys.maxint == 2147483647 and - self.lowleveltype in (SignedLongLong, UnsignedLongLong)): - return ll_hash_long_long - return ll_hash_int - - get_ll_fasthash_function = get_ll_hash_function - - def get_ll_dummyval_obj(self, rtyper, s_value): - # if >= 0, then all negative values are special - if s_value.nonneg and self.lowleveltype is Signed: - return signed_repr # whose ll_dummy_value is -1 - else: - return None - - ll_dummy_value = -1 - - def rtype_chr(_, hop): - vlist = hop.inputargs(Signed) - if hop.has_implicit_exception(ValueError): - hop.exception_is_here() - hop.gendirectcall(ll_check_chr, vlist[0]) - else: - hop.exception_cannot_occur() - return hop.genop('cast_int_to_char', vlist, resulttype=Char) - - def rtype_unichr(_, hop): - vlist = hop.inputargs(Signed) - if hop.has_implicit_exception(ValueError): - hop.exception_is_here() - hop.gendirectcall(ll_check_unichr, vlist[0]) - else: - hop.exception_cannot_occur() - return hop.genop('cast_int_to_unichar', vlist, resulttype=UniChar) - - def rtype_bool(self, hop): - assert self is self.as_int # rtype_is_true() is overridden in BoolRepr - vlist = hop.inputargs(self) - return hop.genop(self.opprefix + 'is_true', vlist, resulttype=Bool) - - #Unary arithmetic operations - - def rtype_abs(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - if hop.s_result.unsigned: - return vlist[0] - else: - return hop.genop(self.opprefix + 'abs', vlist, resulttype=self) - - def rtype_abs_ovf(self, hop): - self = self.as_int - if hop.s_result.unsigned: - raise TyperError("forbidden uint_abs_ovf") - else: - vlist = hop.inputargs(self) - hop.has_implicit_exception(OverflowError) # record we know about it - hop.exception_is_here() - return hop.genop(self.opprefix + 'abs_ovf', vlist, resulttype=self) - - def rtype_invert(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - return hop.genop(self.opprefix + 'invert', vlist, resulttype=self) - - def rtype_neg(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - if hop.s_result.unsigned: - # implement '-r_uint(x)' with unsigned subtraction '0 - x' - zero = self.lowleveltype._defl() - vlist.insert(0, hop.inputconst(self.lowleveltype, zero)) - return hop.genop(self.opprefix + 'sub', vlist, resulttype=self) - else: - return hop.genop(self.opprefix + 'neg', vlist, resulttype=self) - - def rtype_neg_ovf(self, hop): - self = self.as_int - if hop.s_result.unsigned: - # this is supported (and turns into just 0-x) for rbigint.py - hop.exception_cannot_occur() - return self.rtype_neg(hop) - else: - vlist = hop.inputargs(self) - hop.has_implicit_exception(OverflowError) # record we know about it - hop.exception_is_here() - return hop.genop(self.opprefix + 'neg_ovf', vlist, resulttype=self) - - def rtype_pos(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - return vlist[0] - - def rtype_int(self, hop): - if self.lowleveltype in (Unsigned, UnsignedLongLong): - raise TyperError("use intmask() instead of int(r_uint(...))") - vlist = hop.inputargs(Signed) - hop.exception_cannot_occur() - return vlist[0] - - def rtype_float(_, hop): - vlist = hop.inputargs(Float) - hop.exception_cannot_occur() - return vlist[0] - - @jit.elidable - def ll_str(self, i): - from rpython.rtyper.lltypesystem.ll_str import ll_int2dec - return ll_int2dec(i) - - def rtype_hex(self, hop): - from rpython.rtyper.lltypesystem.ll_str import ll_int2hex - self = self.as_int - varg = hop.inputarg(self, 0) - true = inputconst(Bool, True) - return hop.gendirectcall(ll_int2hex, varg, true) - - def rtype_oct(self, hop): - from rpython.rtyper.lltypesystem.ll_str import ll_int2oct - self = self.as_int - varg = hop.inputarg(self, 0) - true = inputconst(Bool, True) - return hop.gendirectcall(ll_int2oct, varg, true) - def ll_hash_int(n): return intmask(n) @@ -407,3 +419,38 @@ return else: raise ValueError + +# +# _________________________ Conversions _________________________ + +class __extend__(pairtype(IntegerRepr, FloatRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Float: + log.debug('explicit cast_uint_to_float') + return llops.genop('cast_uint_to_float', [v], resulttype=Float) + if r_from.lowleveltype == Signed and r_to.lowleveltype == Float: + log.debug('explicit cast_int_to_float') + return llops.genop('cast_int_to_float', [v], resulttype=Float) + if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Float: + log.debug('explicit cast_longlong_to_float') + return llops.genop('cast_longlong_to_float', [v], resulttype=Float) + if r_from.lowleveltype == UnsignedLongLong and r_to.lowleveltype == Float: + log.debug('explicit cast_ulonglong_to_float') + return llops.genop('cast_ulonglong_to_float', [v], resulttype=Float) + return NotImplemented + +class __extend__(pairtype(FloatRepr, IntegerRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Float and r_to.lowleveltype == Unsigned: + log.debug('explicit cast_float_to_uint') + return llops.genop('cast_float_to_uint', [v], resulttype=Unsigned) + if r_from.lowleveltype == Float and r_to.lowleveltype == Signed: + log.debug('explicit cast_float_to_int') + return llops.genop('cast_float_to_int', [v], resulttype=Signed) + if r_from.lowleveltype == Float and r_to.lowleveltype == SignedLongLong: + log.debug('explicit cast_float_to_longlong') + return llops.genop('cast_float_to_longlong', [v], resulttype=SignedLongLong) + if r_from.lowleveltype == Float and r_to.lowleveltype == UnsignedLongLong: + log.debug('explicit cast_float_to_ulonglong') + return llops.genop('cast_float_to_ulonglong', [v], resulttype=UnsignedLongLong) + return NotImplemented diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -9,7 +9,8 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool from rpython.rtyper.lltypesystem.lltype import nullptr, Char, UniChar, Number -from rpython.rtyper.rmodel import Repr, IteratorRepr, IntegerRepr +from rpython.rtyper.rmodel import Repr, IteratorRepr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import AbstractStringRepr, AbstractCharRepr from rpython.tool.pairtype import pairtype, pair diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,7 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, Float, typeOf, +from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, LowLevelType, isCompatibleType) from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -322,25 +322,6 @@ return NotImplemented # ____________________________________________________________ -# Primitive Repr classes, in the same hierarchical order as -# the corresponding SomeObjects - -class FloatRepr(Repr): - lowleveltype = Float - -class IntegerRepr(FloatRepr): - def __init__(self, lowleveltype, opprefix): - self.lowleveltype = lowleveltype - self._opprefix = opprefix - self.as_int = self - - def _get_opprefix(self): - if self._opprefix is None: - raise TyperError("arithmetic not supported on %r, its size is too small" % - self.lowleveltype) - return self._opprefix - - opprefix = property(_get_opprefix) class VoidRepr(Repr): lowleveltype = Void diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -5,7 +5,8 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import Repr, IntegerRepr +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -2,7 +2,8 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Void, Ptr from rpython.rtyper.rlist import dum_nocheck, dum_checkidx -from rpython.rtyper.rmodel import Repr, IntegerRepr, IteratorRepr +from rpython.rtyper.rmodel import Repr, IteratorRepr +from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -3,7 +3,9 @@ from rpython.rtyper import rint from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar -from rpython.rtyper.rmodel import IntegerRepr, IteratorRepr, inputconst, Repr +from rpython.rtyper.rmodel import IteratorRepr, inputconst, Repr +from rpython.rtyper.rint import IntegerRepr +from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name from rpython.tool.staticmethods import StaticMethods @@ -473,6 +475,11 @@ # overriding rtype_mod() below return r_str.ll.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) +class __extend__(pairtype(AbstractStringRepr, FloatRepr)): + def rtype_mod(_, hop): + from rpython.rtyper.lltypesystem.rstr import do_stringformat + return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + class __extend__(pairtype(AbstractStringRepr, IntegerRepr)): def rtype_getitem((r_str, r_int), hop, checkidx=False): diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -9,8 +9,9 @@ Void, Signed, Bool, Ptr, GcStruct, malloc, typeOf, nullptr) from rpython.rtyper.lltypesystem.rstr import LLHelpers from rpython.rtyper.rstr import AbstractStringRepr -from rpython.rtyper.rmodel import (Repr, IntegerRepr, inputconst, IteratorRepr, +from rpython.rtyper.rmodel import (Repr, inputconst, IteratorRepr, externalvsinternal) +from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype From noreply at buildbot.pypy.org Tue May 13 22:07:57 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 May 2014 22:07:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140513200757.6C0EF1D2D24@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71497:7cdf492553dd Date: 2014-05-13 21:06 +0100 http://bitbucket.org/pypy/pypy/changeset/7cdf492553dd/ Log: merge heads diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,7 +522,7 @@ # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations - 'bextr', + 'andn', 'bextr', 'blsi', 'blsmask', 'blsr', 'tzcnt', 'lzcnt', ]) # a partial list is hopefully good enough for now; it's all to support From noreply at buildbot.pypy.org Wed May 14 01:27:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 01:27:44 +0200 (CEST) Subject: [pypy-commit] pypy default: make hop.nb_args a property Message-ID: <20140513232744.43D141D236E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71498:e0f339dd838e Date: 2014-05-14 00:26 +0100 http://bitbucket.org/pypy/pypy/changeset/e0f339dd838e/ Log: make hop.nb_args a property diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -54,7 +54,6 @@ from rpython.rtyper.rtuple import TupleRepr if arguments.w_stararg != hop.nb_args - 3: raise TyperError("call pattern too complex") - hop.nb_args -= 1 v_tuple = hop.args_v.pop() s_tuple = hop.args_s.pop() r_tuple = hop.args_r.pop() @@ -62,7 +61,6 @@ raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) - hop.nb_args += 1 hop.args_v.append(v_item) hop.args_s.append(s_tuple.items[i]) hop.args_r.append(r_tuple.items_r[i]) @@ -177,7 +175,7 @@ result.append(hop.inputarg(r, arg=i)) else: result.append(None) - hop.nb_args -= len(lst) + del hop.args_v[hop.nb_args - len(lst):] return result def get_builtin_method_self(x): @@ -367,8 +365,7 @@ (i_zero, None), (i_track_allocation, None), (i_add_memory_pressure, None)) - (v_flavor, v_zero, v_track_allocation, - v_add_memory_pressure) = kwds_v + (v_flavor, v_zero, v_track_allocation, v_add_memory_pressure) = kwds_v flags = {'flavor': 'gc'} if v_flavor is not None: flags['flavor'] = v_flavor.value diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -661,7 +661,6 @@ def setup(self): rtyper = self.rtyper spaceop = self.spaceop - self.nb_args = len(spaceop.args) self.args_v = list(spaceop.args) self.args_s = [rtyper.binding(a) for a in spaceop.args] self.s_result = rtyper.binding(spaceop.result) @@ -669,6 +668,10 @@ self.r_result = rtyper.getrepr(self.s_result) rtyper.call_all_setups() # compute ForwardReferences now + @property + def nb_args(self): + return len(self.args_v) + def copy(self): result = HighLevelOp(self.rtyper, self.spaceop, self.exceptionlinks, self.llops) @@ -726,7 +729,6 @@ def r_s_pop(self, index=-1): "Return and discard the argument with index position." - self.nb_args -= 1 self.args_v.pop(index) return self.args_r.pop(index), self.args_s.pop(index) @@ -739,7 +741,6 @@ self.args_v.insert(0, v_newfirstarg) self.args_r.insert(0, r_newfirstarg) self.args_s.insert(0, s_newfirstarg) - self.nb_args += 1 def swap_fst_snd_args(self): self.args_v[0], self.args_v[1] = self.args_v[1], self.args_v[0] From noreply at buildbot.pypy.org Wed May 14 02:05:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 May 2014 02:05:30 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: issue1764 Message-ID: <20140514000530.50A201C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71499:b3774dd44ffc Date: 2014-05-12 18:30 +0200 http://bitbucket.org/pypy/pypy/changeset/b3774dd44ffc/ Log: issue1764 Test and fix (grafted from cae86999e6c850e5941d326ce5cddb7e85c6081e) diff --git a/rpython/translator/c/gcc/instruction.py b/rpython/translator/c/gcc/instruction.py --- a/rpython/translator/c/gcc/instruction.py +++ b/rpython/translator/c/gcc/instruction.py @@ -184,6 +184,9 @@ def __init__(self): self.delta = -7.25 # use this non-integer value as a marker +class InsnPushed(InsnStackAdjust): + pass + class InsnStop(Insn): _args_ = ['reason'] def __init__(self, reason='?'): diff --git a/rpython/translator/c/gcc/test/elf64/track_random_rsp_rbp.s b/rpython/translator/c/gcc/test/elf64/track_random_rsp_rbp.s new file mode 100644 --- /dev/null +++ b/rpython/translator/c/gcc/test/elf64/track_random_rsp_rbp.s @@ -0,0 +1,158 @@ + .type seterror.part.1, @function +seterror.part.1: +.LFB77: + .cfi_startproc + pushq %r14 + .cfi_def_cfa_offset 16 + .cfi_offset 14, -16 + pushq %r13 + .cfi_def_cfa_offset 24 + .cfi_offset 13, -24 + pushq %r12 + .cfi_def_cfa_offset 32 + .cfi_offset 12, -32 + pushq %rbp + .cfi_def_cfa_offset 40 + .cfi_offset 6, -40 + pushq %rbx + .cfi_def_cfa_offset 48 + .cfi_offset 3, -48 + subq $512, %rsp + .cfi_def_cfa_offset 560 + testq %r8, %r8 + je .L30 +.L11: + movq PyPyExc_TypeError at GOTPCREL(%rip), %rax + movq %r8, %rsi + movq (%rax), %rdi + call PyPyErr_SetString at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq $512, %rsp + .cfi_remember_state + .cfi_def_cfa_offset 48 + popq %rbx + .cfi_def_cfa_offset 40 + popq %rbp + .cfi_def_cfa_offset 32 + popq %r12 + .cfi_def_cfa_offset 24 + popq %r13 + .cfi_def_cfa_offset 16 + popq %r14 + .cfi_def_cfa_offset 8 + ret + .p2align 4,,10 + .p2align 3 +.L30: + .cfi_restore_state + testq %rcx, %rcx + movq %rsi, %r12 + movl %edi, %r14d + movq %rdx, %r13 + movq %rsp, %rbp + movl $512, %esi + movq %rsp, %rbx + je .L13 + leaq .LC6(%rip), %rdx + movl $512, %esi + movq %rsp, %rdi + xorl %eax, %eax + movq %rsp, %rbx + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } +.L14: + movl (%rbx), %eax + addq $4, %rbx + leal -16843009(%rax), %esi + notl %eax + andl %eax, %esi + andl $-2139062144, %esi + je .L14 + movl %esi, %eax + shrl $16, %eax + testl $32896, %esi + cmove %eax, %esi + leaq 2(%rbx), %rax + cmove %rax, %rbx + addb %sil, %sil + movq %rbp, %rsi + sbbq $3, %rbx + subq %rbx, %rsi + addq $512, %rsi +.L13: + testl %r14d, %r14d + je .L16 + leaq .LC7(%rip), %rdx + movq %rbx, %rdi + movl %r14d, %ecx + xorl %eax, %eax + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbx, %rdi + call strlen at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq %rax, %rbx + movl 0(%r13), %eax + testl %eax, %eax + jle .L18 + movq %rbx, %rdx + subq %rbp, %rdx + cmpl $219, %edx + jg .L18 + addq $4, %r13 + xorl %r14d, %r14d + .p2align 4,,10 + .p2align 3 +.L21: + movq %rbp, %rsi + leal -1(%rax), %ecx + leaq .LC8(%rip), %rdx + subq %rbx, %rsi + movq %rbx, %rdi + xorl %eax, %eax + addq $512, %rsi + addl $1, %r14d + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbx, %rdi + call strlen at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq %rax, %rbx + movl 0(%r13), %eax + testl %eax, %eax + jle .L18 + cmpl $32, %r14d + je .L18 + movq %rbx, %rdx + addq $4, %r13 + subq %rbp, %rdx + cmpl $219, %edx + jle .L21 + jmp .L18 + .p2align 4,,10 + .p2align 3 +.L16: + leaq .LC9(%rip), %rdx + movq %rbx, %rdi + xorl %eax, %eax + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbx, %rdi + call strlen at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + addq %rax, %rbx +.L18: + movq %rbp, %rsi + leaq .LC10(%rip), %rdx + movq %r12, %rcx + subq %rbx, %rsi + movq %rbx, %rdi + xorl %eax, %eax + addq $512, %rsi + call PyPyOS_snprintf at PLT + ;; expected {552(%rsp) | 512(%rsp), 528(%rsp), 536(%rsp), 544(%rsp), %r15, 520(%rsp) | } + movq %rbp, %r8 + jmp .L11 + .cfi_endproc +.LFE77: + .size seterror.part.1, .-seterror.part.1 diff --git a/rpython/translator/c/gcc/test/test_trackgcroot.py b/rpython/translator/c/gcc/test/test_trackgcroot.py --- a/rpython/translator/c/gcc/test/test_trackgcroot.py +++ b/rpython/translator/c/gcc/test/test_trackgcroot.py @@ -130,7 +130,7 @@ elif format == 'darwin' or format == 'darwin64': py.test.skip("disabled on OS/X's terribly old gcc") else: - r_globallabel = re.compile(r"([\w]+)=[.]+") + r_globallabel = re.compile(r"([\w.]+)=[.]+") print print path.dirpath().basename + '/' + path.basename lines = path.readlines() diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -8,7 +8,7 @@ from rpython.translator.c.gcc.instruction import InsnSetLocal, InsnCopyLocal from rpython.translator.c.gcc.instruction import InsnPrologue, InsnEpilogue from rpython.translator.c.gcc.instruction import InsnGCROOT, InsnCondJump -from rpython.translator.c.gcc.instruction import InsnStackAdjust +from rpython.translator.c.gcc.instruction import InsnStackAdjust, InsnPushed from rpython.translator.c.gcc.instruction import InsnCannotFollowEsp from rpython.translator.c.gcc.instruction import LocalVar, somenewvalue from rpython.translator.c.gcc.instruction import frameloc_esp, frameloc_ebp @@ -665,14 +665,22 @@ match = self.r_unaryinsn.match(line) source = match.group(1) return self.insns_for_copy(source, self.TOP_OF_STACK_MINUS_WORD) + \ - [InsnStackAdjust(-self.WORD)] + [InsnPushed(-self.WORD)] def _visit_pop(self, target): return [InsnStackAdjust(+self.WORD)] + \ self.insns_for_copy(self.TOP_OF_STACK_MINUS_WORD, target) def _visit_prologue(self): - # for the prologue of functions that use %ebp as frame pointer + # For the prologue of functions that use %ebp as frame pointer. + # First, find the latest InsnStackAdjust; if it's not a PUSH, + # then consider that this 'mov %rsp, %rbp' is actually unrelated + i = -1 + while not isinstance(self.insns[i], InsnStackAdjust): + i -= 1 + if not isinstance(self.insns[i], InsnPushed): + return [] + # self.uses_frame_pointer = True self.r_localvar = self.r_localvarfp return [InsnPrologue(self.WORD)] From noreply at buildbot.pypy.org Wed May 14 02:05:31 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 May 2014 02:05:31 +0200 (CEST) Subject: [pypy-commit] pypy default: redo irc_topic and tests Message-ID: <20140514000531.8256F1C02D8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71500:4c48d672074a Date: 2014-05-14 03:04 +0300 http://bitbucket.org/pypy/pypy/changeset/4c48d672074a/ Log: redo irc_topic and tests diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -671,8 +671,9 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - irc_topic = sys.version_info[3] != 'final' or ( - readenv and os.getenv('PYPY_IRC_TOPIC')) + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' or ( + readenv and os.getenv('PYPY_IRC_TOPIC')) success = run_toplevel(interactive_console, mainmodule, quiet=not irc_topic) except SystemExit, e: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,12 +7,8 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir -from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = PYPY_VERSION[3] == "final" - - banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -246,10 +242,6 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt - if is_release: - assert irc_header not in child.before - else: - assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') @@ -269,11 +261,22 @@ child.sendline("'' in sys.path") child.expect("True") - def test_irc_topic(self, monkeypatch): + def test_yes_irc_topic(self, monkeypatch): monkeypatch.setenv('PYPY_IRC_TOPIC', '1') child = self.spawn([]) child.expect(irc_header) # banner + def test_maybe_irc_topic(self): + import sys + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' + child = self.spawn([]) + child.expect('>>>') # banner + if irc_topic: + assert irc_header in child.before + else: + assert irc_header not in child.before + def test_help(self): # test that -h prints the usage, including the name of the executable # which should be /full/path/to/app_main.py in this case @@ -934,6 +937,7 @@ # ---------------------------------------- from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION cpy_ver = '%d.%d' % CPYTHON_VERSION[:2] + from lib_pypy._pypy_interact import irc_header goal_dir = os.path.dirname(app_main) # build a directory hierarchy like which contains both bin/pypy-c and @@ -953,6 +957,7 @@ self.w_fake_exe = self.space.wrap(str(fake_exe)) self.w_expected_path = self.space.wrap(expected_path) self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir)) + self.w_is_release = self.space.wrap(PYPY_VERSION[3] == "final") self.w_tmp_dir = self.space.wrap(tmp_dir) @@ -1022,3 +1027,4 @@ # assert it did not crash finally: sys.path[:] = old_sys_path + From noreply at buildbot.pypy.org Wed May 14 02:22:41 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 02:22:41 +0200 (CEST) Subject: [pypy-commit] pypy HopArg: Create HopArg class to bundle together a Constant-or-Variable, a Some and a Repr Message-ID: <20140514002241.39B971C0EE4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: HopArg Changeset: r71501:e4b87a851b03 Date: 2014-05-14 01:21 +0100 http://bitbucket.org/pypy/pypy/changeset/e4b87a851b03/ Log: Create HopArg class to bundle together a Constant-or-Variable, a Some and a Repr diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -648,6 +648,22 @@ # ____________________________________________________________ +class HopArg(object): + # XXX: find a better name and a meaningful definition + def __init__(self, v, s, r, llops): + self.v = v # a Constant or Variable + self.s = s # an annotation + self.r = r # a repr + self.llops = llops + + def convert_to(self, repr): + v = self.v + if isinstance(v, Constant): + return inputconst(repr, v.value) + if self.s.is_constant(): + return inputconst(repr, self.s.const) + return self.llops.convertvar(v, self.r, repr) + class HighLevelOp(object): forced_opname = None @@ -672,6 +688,11 @@ def nb_args(self): return len(self.args_v) + @property + def args(self): + return [HopArg(v, s, r, self.llops) for v, s, r in + zip(self.args_v, self.args_s, self.args_r)] + def copy(self): result = HighLevelOp(self.rtyper, self.spaceop, self.exceptionlinks, self.llops) @@ -697,17 +718,7 @@ """ if not isinstance(converted_to, Repr): converted_to = self.rtyper.getprimitiverepr(converted_to) - v = self.args_v[arg] - if isinstance(v, Constant): - return inputconst(converted_to, v.value) - assert hasattr(v, 'concretetype') - - s_binding = self.args_s[arg] - if s_binding.is_constant(): - return inputconst(converted_to, s_binding.const) - - r_binding = self.args_r[arg] - return self.llops.convertvar(v, r_binding, converted_to) + return self.args[arg].convert_to(converted_to) inputconst = staticmethod(inputconst) # export via the HighLevelOp class @@ -717,8 +728,10 @@ "'%s' has %d arguments, rtyper wants %d" % ( self.spaceop.opname, self.nb_args, len(converted_to))) vars = [] - for i in range(len(converted_to)): - vars.append(self.inputarg(converted_to[i], i)) + for arg, repr in zip(self.args, converted_to): + if not isinstance(repr, Repr): + repr = self.rtyper.getprimitiverepr(repr) + vars.append(arg.convert_to(repr)) return vars def genop(self, opname, args_v, resulttype=None): From noreply at buildbot.pypy.org Wed May 14 03:30:38 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 03:30:38 +0200 (CEST) Subject: [pypy-commit] pypy HopArg: don't store llops in HopArg Message-ID: <20140514013038.23B3B1C0320@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: HopArg Changeset: r71502:c195d6f7fb54 Date: 2014-05-14 02:29 +0100 http://bitbucket.org/pypy/pypy/changeset/c195d6f7fb54/ Log: don't store llops in HopArg diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -650,19 +650,18 @@ class HopArg(object): # XXX: find a better name and a meaningful definition - def __init__(self, v, s, r, llops): + def __init__(self, v, s, r): self.v = v # a Constant or Variable self.s = s # an annotation self.r = r # a repr - self.llops = llops - def convert_to(self, repr): + def convert_to(self, repr, llops): v = self.v if isinstance(v, Constant): return inputconst(repr, v.value) if self.s.is_constant(): return inputconst(repr, self.s.const) - return self.llops.convertvar(v, self.r, repr) + return llops.convertvar(v, self.r, repr) class HighLevelOp(object): @@ -690,7 +689,7 @@ @property def args(self): - return [HopArg(v, s, r, self.llops) for v, s, r in + return [HopArg(v, s, r) for v, s, r in zip(self.args_v, self.args_s, self.args_r)] def copy(self): @@ -718,7 +717,7 @@ """ if not isinstance(converted_to, Repr): converted_to = self.rtyper.getprimitiverepr(converted_to) - return self.args[arg].convert_to(converted_to) + return self.args[arg].convert_to(converted_to, self.llops) inputconst = staticmethod(inputconst) # export via the HighLevelOp class @@ -731,7 +730,7 @@ for arg, repr in zip(self.args, converted_to): if not isinstance(repr, Repr): repr = self.rtyper.getprimitiverepr(repr) - vars.append(arg.convert_to(repr)) + vars.append(arg.convert_to(repr, self.llops)) return vars def genop(self, opname, args_v, resulttype=None): From noreply at buildbot.pypy.org Wed May 14 05:08:17 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 05:08:17 +0200 (CEST) Subject: [pypy-commit] pypy HopArg: store HighLevelOp data as a list of HopArgs Message-ID: <20140514030817.759481C02D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: HopArg Changeset: r71503:d8d5f7833dcc Date: 2014-05-14 04:07 +0100 http://bitbucket.org/pypy/pypy/changeset/d8d5f7833dcc/ Log: store HighLevelOp data as a list of HopArgs diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -1,6 +1,7 @@ from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant from rpython.rlib import rarithmetic, objectmodel +from rpython.rtyper.rtyper import HopArg from rpython.rtyper import raddress, rptr, extregistry, rrange from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype, llmemory, rclass @@ -47,23 +48,19 @@ hop = hop.copy() from rpython.annotator.argument import ArgumentsForTranslation arguments = ArgumentsForTranslation.fromshape( - hop.args_s[1].const, # shape + hop.args[1].s.const, # shape range(hop.nb_args-2)) if arguments.w_stararg is not None: # expand the *arg in-place -- it must be a tuple from rpython.rtyper.rtuple import TupleRepr if arguments.w_stararg != hop.nb_args - 3: raise TyperError("call pattern too complex") - v_tuple = hop.args_v.pop() - s_tuple = hop.args_s.pop() - r_tuple = hop.args_r.pop() - if not isinstance(r_tuple, TupleRepr): + tup = hop.args.pop() + if not isinstance(tup.r, TupleRepr): raise TyperError("*arg must be a tuple") - for i in range(len(r_tuple.items_r)): - v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) - hop.args_v.append(v_item) - hop.args_s.append(s_tuple.items[i]) - hop.args_r.append(r_tuple.items_r[i]) + for i in range(len(tup.r.items_r)): + v_item = tup.r.getitem_internal(hop.llops, tup.v, i) + hop.args.append(HopArg(v_item, tup.s.items[i], tup.r.items_r[i])) keywords = arguments.keywords if not takes_kwds and keywords: @@ -143,13 +140,15 @@ self.self_repr.__class__.__name__, name)) # hack based on the fact that 'lowleveltype == self_repr.lowleveltype' hop2 = hop.copy() - assert hop2.args_r[0] is self - if isinstance(hop2.args_v[0], Constant): - c = hop2.args_v[0].value # get object from bound method + assert hop2.args[0].r is self + if isinstance(hop2.args[0].v, Constant): + c = hop2.args[0].v.value # get object from bound method c = get_builtin_method_self(c) - hop2.args_v[0] = Constant(c) - hop2.args_s[0] = self.s_self - hop2.args_r[0] = self.self_repr + v_self = Constant(c) + else: + v_self = hop2.args[0].v + h_self = HopArg(v_self, self.s_self, self.self_repr) + hop2.args[0] = h_self return bltintyper(hop2) class __extend__(pairtype(BuiltinMethodRepr, BuiltinMethodRepr)): @@ -175,7 +174,7 @@ result.append(hop.inputarg(r, arg=i)) else: result.append(None) - del hop.args_v[hop.nb_args - len(lst):] + del hop.args[hop.nb_args - len(lst):] return result def get_builtin_method_self(x): @@ -316,8 +315,9 @@ for i in range(len(new_args_r)): assert hop.args_r[i].lowleveltype == new_args_r[i].lowleveltype - hop.args_r = new_args_r - hop.args_s = [s_callable] + args_s + new_args = [HopArg(v, s, r) for v, s, r in + zip(hop.args_v, [s_callable] + args_s, new_args_r)] + hop.args = new_args hop.s_result = s_ret assert hop.r_result.lowleveltype == rresult.lowleveltype diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -390,6 +390,7 @@ raise NotImplementedError def _emulate_call(self, hop, meth_name): + from rpython.rtyper.rtyper import HopArg vinst, = hop.inputargs(self) clsdef = hop.args_s[0].classdef s_unbound_attr = clsdef.find_attribute(meth_name).getvalue() @@ -402,10 +403,10 @@ r_method = self.rtyper.getrepr(s_attr) r_method.get_method_from_instance(self, vinst, hop.llops) hop2 = hop.copy() - hop2.spaceop = op.simple_call(hop.spaceop.args[0]) + v = hop.args[0].v + hop2.spaceop = op.simple_call(v) hop2.spaceop.result = hop.spaceop.result - hop2.args_r = [r_method] - hop2.args_s = [s_attr] + hop2.args = [HopArg(v, s_attr, r_method)] return hop2.dispatch() def rtype_iter(self, hop): diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -1,5 +1,6 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError +from rpython.rtyper.rtyper import HopArg from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -58,11 +59,11 @@ raise TyperError("args_r[%d] = %r, expected ControlledInstanceRepr" % (index, r_controlled)) s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj - hop2.args_s[index], hop2.args_r[index] = s_new, r_new v = hop2.args_v[index] if isinstance(v, Constant): real_value = r_controlled.controller.convert(v.value) - hop2.args_v[index] = Constant(real_value) + v = Constant(real_value) + hop2.args[index] = HopArg(v, s_new, r_new) if revealresult: r_controlled = hop2.r_result if not isinstance(r_controlled, ControlledInstanceRepr): diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -2,6 +2,7 @@ from rpython.annotator import model as annmodel, description from rpython.flowspace.model import Constant +from rpython.rtyper.rtyper import HopArg from rpython.rtyper import rclass, callparse from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError @@ -563,11 +564,15 @@ # XXX obscure, try to refactor... s_function = annmodel.SomePBC([self.funcdesc]) hop2 = hop.copy() - hop2.args_s[0] = self.s_im_self # make the 1st arg stand for 'im_self' - hop2.args_r[0] = self.r_im_self # (same lowleveltype as 'self') + + # make the 1st arg stand for 'im_self' if isinstance(hop2.args_v[0], Constant): boundmethod = hop2.args_v[0].value - hop2.args_v[0] = Constant(boundmethod.im_self) + v_im_self = Constant(boundmethod.im_self) + else: + v_im_self = hop2.args[0].v + hop2.args[0] = HopArg(v_im_self, self.s_im_self, self.r_im_self) + if call_args: hop2.swap_fst_snd_args() _, s_shape = hop2.r_s_popfirstarg() # temporarely remove shape @@ -854,8 +859,8 @@ def add_instance_arg_to_hop(self, hop, call_args): hop2 = hop.copy() - hop2.args_s[0] = self.s_im_self # make the 1st arg stand for 'im_self' - hop2.args_r[0] = self.r_im_self # (same lowleveltype as 'self') + hop2.args[0].s = self.s_im_self # make the 1st arg stand for 'im_self' + hop2.args[0].r = self.r_im_self # (same lowleveltype as 'self') if call_args: hop2.swap_fst_snd_args() diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -1,5 +1,6 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError +from rpython.rtyper.rtyper import HopArg from rpython.rtyper.lltypesystem.lltype import Signed, Void, Ptr from rpython.rtyper.rlist import dum_nocheck, dum_checkidx from rpython.rtyper.rmodel import Repr, IteratorRepr @@ -207,7 +208,7 @@ v_enumerate, = hop.inputargs(self) v_index = hop.gendirectcall(self.ll_getnextindex, v_enumerate) hop2 = hop.copy() - hop2.args_r = [self.r_baseiter] + hop2.args = [HopArg(hop.args[0].v, hop.args[0].s, self.r_baseiter)] r_item_src = self.r_baseiter.external_item_repr r_item_dst = hop.r_result.items_r[1] v_item = self.r_baseiter.rtype_next(hop2) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -676,10 +676,12 @@ def setup(self): rtyper = self.rtyper spaceop = self.spaceop - self.args_v = list(spaceop.args) - self.args_s = [rtyper.binding(a) for a in spaceop.args] + args_v = spaceop.args + args_s = [rtyper.binding(a) for a in args_v] + args_r = [rtyper.getrepr(s_a) for s_a in args_s] + self.args = [HopArg(v, s, r) for v, s, r in + zip(args_v, args_s, args_r)] self.s_result = rtyper.binding(spaceop.result) - self.args_r = [rtyper.getrepr(s_a) for s_a in self.args_s] self.r_result = rtyper.getrepr(self.s_result) rtyper.call_all_setups() # compute ForwardReferences now @@ -688,9 +690,16 @@ return len(self.args_v) @property - def args(self): - return [HopArg(v, s, r) for v, s, r in - zip(self.args_v, self.args_s, self.args_r)] + def args_v(self): + return [arg.v for arg in self.args] + + @property + def args_s(self): + return [arg.s for arg in self.args] + + @property + def args_r(self): + return [arg.r for arg in self.args] def copy(self): result = HighLevelOp(self.rtyper, self.spaceop, @@ -741,8 +750,8 @@ def r_s_pop(self, index=-1): "Return and discard the argument with index position." - self.args_v.pop(index) - return self.args_r.pop(index), self.args_s.pop(index) + arg = self.args.pop(index) + return arg.r, arg.s def r_s_popfirstarg(self): "Return and discard the first argument." @@ -750,14 +759,11 @@ def v_s_insertfirstarg(self, v_newfirstarg, s_newfirstarg): r_newfirstarg = self.rtyper.getrepr(s_newfirstarg) - self.args_v.insert(0, v_newfirstarg) - self.args_r.insert(0, r_newfirstarg) - self.args_s.insert(0, s_newfirstarg) + newarg = HopArg(v_newfirstarg, s_newfirstarg, r_newfirstarg) + self.args.insert(0, newarg) def swap_fst_snd_args(self): - self.args_v[0], self.args_v[1] = self.args_v[1], self.args_v[0] - self.args_s[0], self.args_s[1] = self.args_s[1], self.args_s[0] - self.args_r[0], self.args_r[1] = self.args_r[1], self.args_r[0] + self.args[0], self.args[1] = self.args[1], self.args[0] def has_implicit_exception(self, exc_cls): if self.llops.llop_raising_exceptions is not None: From noreply at buildbot.pypy.org Wed May 14 12:40:57 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 May 2014 12:40:57 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: try to do less unneeded transaction breaks Message-ID: <20140514104057.5A8F51C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71504:8335fe017bcc Date: 2014-05-14 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/8335fe017bcc/ Log: try to do less unneeded transaction breaks diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -43,7 +43,7 @@ #define LOW_FILL_MARK 400000 -static long pypy_transaction_length; +static long pypy_transaction_length = NURSERY_SIZE * 3 / 4; void pypy_stm_set_transaction_length(double fraction) @@ -158,7 +158,7 @@ while (1) { long counter; - if (pypy_stm_ready_atomic == 1) { + if (pypy_stm_should_break_transaction()) { //pypy_stm_ready_atomic == 1) { /* Not in an atomic transaction; but it might be an inevitable transaction. */ @@ -193,22 +193,12 @@ if (STM_SEGMENT->jmpbuf_ptr == &jmpbuf) { /* we can't leave this function leaving a non-inevitable - transaction whose jmpbuf points into this function + transaction whose jmpbuf points into this function. + we could break the transaction here but we instead rely + on the caller to break it. Since we have to use an inevitable + transaction anyway, using the current one may be cheaper. */ - if (pypy_stm_ready_atomic == 1) { - //assert(pypy_stm_nursery_low_fill_mark != 0); - assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - stm_commit_transaction(); - - stm_start_inevitable_transaction(&stm_thread_local); - _pypy_stm_initialize_nursery_low_fill_mark(0); - _pypy_stm_inev_state(); - } - else { - assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); - pypy_stm_nursery_low_fill_mark_saved = 0; - _stm_become_inevitable("perform_transaction left with atomic"); - } + _stm_become_inevitable("perform_transaction left with inevitable"); } /* double-check */ if (pypy_stm_ready_atomic == 1) { From noreply at buildbot.pypy.org Wed May 14 14:17:41 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 May 2014 14:17:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: hacky way to set the transaction length to unlimited when a program never uses threads in the case of no-jit too Message-ID: <20140514121741.0D9311C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71505:a140ce520d35 Date: 2014-05-14 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/a140ce520d35/ Log: hacky way to set the transaction length to unlimited when a program never uses threads in the case of no-jit too diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -27,8 +27,11 @@ { if (pypy_stm_nursery_low_fill_mark == (uintptr_t)-1) { /* atomic */ - pypy_stm_nursery_low_fill_mark_saved = 0; - } else { + if (((long)pypy_stm_nursery_low_fill_mark_saved) > 0) { + pypy_stm_nursery_low_fill_mark_saved = 0; + } + } else if (((long)pypy_stm_nursery_low_fill_mark) > 0) { + /* if not set to unlimited by pypy_stm_setup() (s.b.) */ pypy_stm_nursery_low_fill_mark = 0; } } @@ -43,7 +46,7 @@ #define LOW_FILL_MARK 400000 -static long pypy_transaction_length = NURSERY_SIZE * 3 / 4; +static long pypy_transaction_length; void pypy_stm_set_transaction_length(double fraction) @@ -51,7 +54,7 @@ /* the value '1.0' means 'use the default'. Other values are interpreted proportionally, up to some maximum. */ long low_fill_mark = (long)(LOW_FILL_MARK * fraction); - if (low_fill_mark > NURSERY_SIZE * 3 / 4) + if (low_fill_mark > (long)(NURSERY_SIZE * 3 / 4)) low_fill_mark = NURSERY_SIZE * 3 / 4; pypy_transaction_length = low_fill_mark; } @@ -61,7 +64,10 @@ stm_setup(); pypy_stm_register_thread_local(); pypy_stm_ready_atomic = 1; - pypy_stm_set_transaction_length(1.0); + /* set transaction length to unlimited until the first thread + starts. pypy_stm_set_transaction_length will then be called + again by pypy. */ + pypy_stm_set_transaction_length(-10000.0); pypy_stm_start_inevitable_if_not_atomic(); } @@ -69,6 +75,7 @@ { if (pypy_stm_ready_atomic == 0) { /* first time we see this thread */ + assert(pypy_transaction_length >= 0); int e = errno; pypy_stm_register_thread_local(); errno = e; @@ -218,16 +225,16 @@ /* Reduce the limit so that inevitable transactions are generally shorter. We depend a bit on stmcb_commit_soon() in order for other transactions to signal us in case we block them. */ - uintptr_t t; + long t; if (pypy_stm_ready_atomic == 1) { - t = pypy_stm_nursery_low_fill_mark; - t = _stm_nursery_start + ((t - _stm_nursery_start) >> 2); + t = (long)pypy_stm_nursery_low_fill_mark; + t = _stm_nursery_start + ((t - (long)_stm_nursery_start) >> 2); pypy_stm_nursery_low_fill_mark = t; } else { assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); - t = pypy_stm_nursery_low_fill_mark_saved; - t = _stm_nursery_start + ((t - _stm_nursery_start) >> 2); + t = (long)pypy_stm_nursery_low_fill_mark_saved; + t = _stm_nursery_start + ((t - (long)_stm_nursery_start) >> 2); pypy_stm_nursery_low_fill_mark_saved = t; } } @@ -243,6 +250,8 @@ void pypy_stm_become_globally_unique_transaction(void) { - _pypy_stm_inev_state(); + if (STM_SEGMENT->jmpbuf_ptr != NULL) { + _pypy_stm_inev_state(); + } stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); } From noreply at buildbot.pypy.org Wed May 14 14:51:00 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:00 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: two-line trace output for process switches Message-ID: <20140514125100.6CEC41C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r814:ed9b4d7d02a1 Date: 2014-05-12 10:45 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ed9b4d7d02a1/ Log: two-line trace output for process switches diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -84,7 +84,8 @@ s_new_context.push(nlr.value) except ProcessSwitch, p: if self.trace: - print "====== Switch from: %s to: %s ======" % (s_new_context.short_str(), p.s_new_context.short_str()) + print "====== Switched process from: %s" % s_new_context.short_str() + print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context def loop_bytecodes(self, s_context, may_context_switch=True): From noreply at buildbot.pypy.org Wed May 14 14:51:01 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:01 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Renamed getbytecode() to fetch_next_bytecode() Message-ID: <20140514125101.A316D1C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r815:b398a1500d52 Date: 2014-05-12 12:59 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b398a1500d52/ Log: Renamed getbytecode() to fetch_next_bytecode() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -469,7 +469,7 @@ def extendedVariableTypeAndIndex(self): # AK please explain this method (a helper, I guess) - descriptor = self.getbytecode() + descriptor = self.fetch_next_bytecode() return ((descriptor >> 6) & 3), (descriptor & 63) def extendedPushBytecode(self, interp, current_bytecode): @@ -505,7 +505,7 @@ self.pop() def getExtendedSelectorArgcount(self): - descriptor = self.getbytecode() + descriptor = self.fetch_next_bytecode() return ((self.w_method().getliteral(descriptor & 31)), (descriptor >> 5)) @@ -515,8 +515,8 @@ def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode): from spyvm import error - second = self.getbytecode() - third = self.getbytecode() + second = self.fetch_next_bytecode() + third = self.fetch_next_bytecode() opType = second >> 5 if opType == 0: # selfsend @@ -557,7 +557,7 @@ return self._sendSuperSelector(w_selector, argcount, interp) def secondExtendedSendBytecode(self, interp, current_bytecode): - descriptor = self.getbytecode() + descriptor = self.fetch_next_bytecode() w_selector = self.w_method().getliteral(descriptor & 63) argcount = descriptor >> 6 return self._sendSelfSelector(w_selector, argcount, interp) @@ -567,7 +567,7 @@ # closure bytecodes def pushNewArrayBytecode(self, interp, current_bytecode): - arraySize, popIntoArray = splitter[7, 1](self.getbytecode()) + arraySize, popIntoArray = splitter[7, 1](self.fetch_next_bytecode()) newArray = None if popIntoArray == 1: newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) @@ -579,8 +579,8 @@ raise MissingBytecode("experimentalBytecode") def _extract_index_and_temps(self): - index_in_array = self.getbytecode() - index_of_array = self.getbytecode() + index_in_array = self.fetch_next_bytecode() + index_of_array = self.fetch_next_bytecode() w_indirectTemps = self.gettemp(index_of_array) return index_in_array, w_indirectTemps @@ -618,9 +618,9 @@ self jump: blockSize """ space = self.space - numArgs, numCopied = splitter[4, 4](self.getbytecode()) - j = self.getbytecode() - i = self.getbytecode() + numArgs, numCopied = splitter[4, 4](self.fetch_next_bytecode()) + j = self.fetch_next_bytecode() + i = self.fetch_next_bytecode() blockSize = (j << 8) | i #create new instance of BlockClosure w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, @@ -657,10 +657,10 @@ self.jumpConditional(interp, False, self.shortJumpPosition(current_bytecode)) def longUnconditionalJump(self, interp, current_bytecode): - self.jump((((current_bytecode & 7) - 4) << 8) + self.getbytecode()) + self.jump((((current_bytecode & 7) - 4) << 8) + self.fetch_next_bytecode()) def longJumpPosition(self, current_bytecode): - return ((current_bytecode & 3) << 8) + self.getbytecode() + return ((current_bytecode & 3) << 8) + self.fetch_next_bytecode() def longJumpIfTrue(self, interp, current_bytecode): self.jumpConditional(interp, True, self.longJumpPosition(current_bytecode)) @@ -852,7 +852,7 @@ from rpython.rlib.unroll import unrolling_iterable unrolling_ranges = unrolling_iterable(BYTECODE_RANGES) def bytecode_step_translated(self, context): - bytecode = context.getbytecode() + bytecode = context.fetch_next_bytecode() for entry in unrolling_ranges: if len(entry) == 2: bc, methname = entry diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1275,7 +1275,7 @@ return 16 + self.islarge * 40 + self.argsize @constant_for_version_arg - def getbytecode(self, pc): + def fetch_next_bytecode(self, pc): assert pc >= 0 and pc < len(self.bytes) return self.bytes[pc] diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -794,10 +794,10 @@ # XXX this is incorrect when there is subclassing return self._w_self_size - def getbytecode(self): + def fetch_next_bytecode(self): jit.promote(self._pc) assert self._pc >= 0 - bytecode = self.w_method().getbytecode(self._pc) + bytecode = self.w_method().fetch_next_bytecode(self._pc) currentBytecode = ord(bytecode) self._pc += 1 return currentBytecode diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -118,9 +118,9 @@ assert s_frame.gettemp(2).is_nil(space) s_frame.settemp(2, w("spam")) assert s_frame.gettemp(2).as_string() == "spam" - assert s_frame.getbytecode() == ord("h") - assert s_frame.getbytecode() == ord("e") - assert s_frame.getbytecode() == ord("l") + assert s_frame.fetch_next_bytecode() == ord("h") + assert s_frame.fetch_next_bytecode() == ord("e") + assert s_frame.fetch_next_bytecode() == ord("l") def test_push_pop(): _, frame = new_frame("") diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -162,11 +162,11 @@ # Point over 2 literals of size 4 w_object = methodcontext(pc=13,method=w_m) s_object = w_object.as_methodcontext_get_shadow(space) - assert s_object.getbytecode() == 97 - assert s_object.getbytecode() == 98 - assert s_object.getbytecode() == 99 - assert s_object.getbytecode() == 100 - assert s_object.getbytecode() == 101 + assert s_object.fetch_next_bytecode() == 97 + assert s_object.fetch_next_bytecode() == 98 + assert s_object.fetch_next_bytecode() == 99 + assert s_object.fetch_next_bytecode() == 100 + assert s_object.fetch_next_bytecode() == 101 assert s_object.s_home() == s_object def assert_contains_nils(w_obj): From noreply at buildbot.pypy.org Wed May 14 14:51:02 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:02 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Moved the step-method inside the interpreter like a regular method. Message-ID: <20140514125102.B6B2D1C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r816:5654d57f2f04 Date: 2014-05-12 13:04 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/5654d57f2f04/ Log: Moved the step-method inside the interpreter like a regular method. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -126,7 +126,20 @@ self.loop_bytecodes(s_new_frame, may_context_switch) finally: self.remaining_stack_depth += 1 - + + def step(self, context): + bytecode = context.fetch_next_bytecode() + for entry in UNROLLING_BYTECODE_RANGES: + if len(entry) == 2: + bc, methname = entry + if bytecode == bc: + return getattr(context, methname)(self, bytecode) + else: + start, stop, methname = entry + if start <= bytecode <= stop: + return getattr(context, methname)(self, bytecode) + assert 0, "unreachable" + # ============== Methods for handling user interrupts ============== def jitted_check_for_interrupt(self, s_frame): @@ -820,6 +833,8 @@ (208, 255, "sendLiteralSelectorBytecode"), ] +from rpython.rlib.unroll import unrolling_iterable +UNROLLING_BYTECODE_RANGES = unrolling_iterable(BYTECODE_RANGES) def initialize_bytecode_names(): result = [None] * 256 @@ -849,23 +864,6 @@ # this table is only used for creating named bytecodes in tests and printing BYTECODE_TABLE = initialize_bytecode_table() -from rpython.rlib.unroll import unrolling_iterable -unrolling_ranges = unrolling_iterable(BYTECODE_RANGES) -def bytecode_step_translated(self, context): - bytecode = context.fetch_next_bytecode() - for entry in unrolling_ranges: - if len(entry) == 2: - bc, methname = entry - if bytecode == bc: - return getattr(context, methname)(self, bytecode) - else: - start, stop, methname = entry - if start <= bytecode <= stop: - return getattr(context, methname)(self, bytecode) - assert 0, "unreachable" - -Interpreter.step = bytecode_step_translated - # Smalltalk debugging facilities, patching Interpreter and ContextPartShadow # in order to enable tracing/jumping for message sends etc. def debugging(): From noreply at buildbot.pypy.org Wed May 14 14:51:03 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:03 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Improved printing of context objects (args/temps/stack). Message-ID: <20140514125103.C18DF1C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r817:0283801bfc1e Date: 2014-05-14 13:01 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0283801bfc1e/ Log: Improved printing of context objects (args/temps/stack). Extracted fetch_bytecode from fetch_next_bytecode. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1275,7 +1275,7 @@ return 16 + self.islarge * 40 + self.argsize @constant_for_version_arg - def fetch_next_bytecode(self, pc): + def fetch_bytecode(self, pc): assert pc >= 0 and pc < len(self.bytes) return self.bytes[pc] @@ -1425,17 +1425,23 @@ def str_content(self): return self.get_identifier_string() + def bytecode_string(self, markBytecode=0): + from spyvm.interpreter import BYTECODE_TABLE + retval = "Bytecode:------------" + j = 1 + for i in self.bytes: + retval += '\n' + retval += '->' if j is markBytecode else ' ' + retval += ('%0.2i: 0x%0.2x(%0.3i) ' % (j, ord(i), ord(i))) + BYTECODE_TABLE[ord(i)].__name__ + j += 1 + retval += "\n---------------------" + return retval + def as_string(self, markBytecode=0): - from spyvm.interpreter import BYTECODE_TABLE - j = 1 retval = "\nMethodname: " + self.get_identifier_string() - retval += "\nBytecode:------------\n" - for i in self.bytes: - retval += '->' if j is markBytecode else ' ' - retval += ('%0.2i: 0x%0.2x(%0.3i) ' % (j ,ord(i), ord(i))) + BYTECODE_TABLE[ord(i)].__name__ + "\n" - j += 1 - return retval + "---------------------\n" - + retval += "\n%s" % self.bytecode_string(markBytecode) + return retval + def guess_containing_classname(self): w_class = self.compiled_in() if w_class and w_class.has_space(): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -795,12 +795,14 @@ return self._w_self_size def fetch_next_bytecode(self): - jit.promote(self._pc) - assert self._pc >= 0 - bytecode = self.w_method().fetch_next_bytecode(self._pc) - currentBytecode = ord(bytecode) + pc = jit.promote(self._pc) + assert pc >= 0 self._pc += 1 - return currentBytecode + return self.fetch_bytecode(pc) + + def fetch_bytecode(self, pc): + bytecode = self.w_method().fetch_bytecode(pc) + return ord(bytecode) # ______________________________________________________________________ # Temporary Variables @@ -902,20 +904,49 @@ # ______________________________________________________________________ # Printing + def __str__(self): + retval = self.short_str() + retval += "\n%s" % self.w_method().bytecode_string(markBytecode=self.pc() + 1) + retval += "\nArgs:----------------" + argcount = self.w_method().argsize + j = 0 + for w_obj in self._temps_and_stack[:self._stack_ptr]: + if j == argcount: + retval += "\nTemps:---------------" + if j == self.tempsize(): + retval += "\nStack:---------------" + retval += "\n %0.2i: %s" % (j, w_obj.as_repr_string()) + j += 1 + retval += "\n---------------------" + return retval + + def short_str(self): + arg_strings = self.argument_strings() + if len(arg_strings) > 0: + args = " , ".join(arg_strings) + args = " (%d arg(s): %s)" % (len(arg_strings), args) + else: + args = "" + return '%s [pc: %d] (rcvr: %s)%s' % ( + self.method_str(), + self.pc() + 1, + self.w_receiver().as_repr_string(), + args + ) + def print_stack(self, method=True): return self.print_padded_stack(method)[1] - + def print_padded_stack(self, method): padding = ret_str = '' if self.s_sender() is not None: - padding, ret_str = self.s_sender().print_padded_stack(method) + padding, ret_str = self.s_sender().print_padded_stack(method) if method: desc = self.method_str() else: desc = self.short_str() return padding + ' ', '%s\n%s%s' % (ret_str, padding, desc) - class BlockContextShadow(ContextPartShadow): _attrs_ = ['_w_home', '_initialip', '_eargc'] repr_classname = "BlockContextShadow" @@ -1034,15 +1065,11 @@ # === Printing === - def short_str(self): - return 'BlockContext of %s (%s) [%d]' % ( - self.w_method().get_identifier_string(), - self.w_receiver().as_repr_string(), - self.pc() + 1 - ) - + def argument_strings(self): + return [] + def method_str(self): - return '[] of %s' % self.w_method().get_identifier_string() + return '[] in %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): _attrs_ = ['closure', '_w_receiver', '_w_method'] @@ -1192,35 +1219,17 @@ self.stack_put(index0, w_value) # === Printing === - - def __str__(self): - retval = '\nMethodContext of:' - retval += self.w_method().as_string(markBytecode=self.pc() + 1) - retval += "Stackptr: %d (this is an empty ascending stack with args and temps (%d), then stack)" % (self._stack_ptr, self.tempsize()) - retval += "\nStack : " + str(self._temps_and_stack[:self._stack_ptr]) - return retval - - def short_str(self): - method_str = self.method_str() - argcount = method_str.count(':') - if argcount == 0: - return '%s (rcvr: %s) [pc: %d]' % ( - method_str, - self.w_receiver().as_repr_string(), - self.pc() + 1 - ) - args = '%d' % argcount + + def argument_strings(self): + argcount = self.w_method().argsize + tempsize = self.w_method().tempsize() + args = [] for i in range(argcount): - args += ': %s' % self.peek(argcount -1 - i).as_repr_string() - return '%s (rcvr: %s) [pc: %d] (%s)' % ( - self.method_str(), - self.w_receiver().as_repr_string(), - self.pc() + 1, - args - ) + args.append(self.peek(tempsize - i - 1).as_repr_string()) + return args def method_str(self): - block = '[] of ' if self.is_closure_context() else '' + block = '[] in ' if self.is_closure_context() else '' return '%s%s' % (block, self.w_method().get_identifier_string()) class CachedObjectShadow(AbstractCachingShadow): From noreply at buildbot.pypy.org Wed May 14 14:51:04 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Appended *Bytecode to some bytecode names for consistency. Message-ID: <20140514125104.CC3141C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r818:05fd9bfcba9c Date: 2014-05-14 13:09 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/05fd9bfcba9c/ Log: Appended *Bytecode to some bytecode names for consistency. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -204,7 +204,7 @@ w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(arguments_w) <= 7 - w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethod + w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethodBytecode w_method.set_lookup_class_and_name(w_receiver.getclass(self.space), "Interpreter.perform") s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) s_frame.push(w_receiver) @@ -459,22 +459,22 @@ self.mark_returned() raise nlr - def returnReceiver(self, interp, current_bytecode): + def returnReceiverBytecode(self, interp, current_bytecode): return self._return(self.w_receiver(), interp, self.s_home().s_sender()) - def returnTrue(self, interp, current_bytecode): + def returnTrueBytecode(self, interp, current_bytecode): return self._return(interp.space.w_true, interp, self.s_home().s_sender()) - def returnFalse(self, interp, current_bytecode): + def returnFalseBytecode(self, interp, current_bytecode): return self._return(interp.space.w_false, interp, self.s_home().s_sender()) - def returnNil(self, interp, current_bytecode): + def returnNilBytecode(self, interp, current_bytecode): return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) - def returnTopFromMethod(self, interp, current_bytecode): + def returnTopFromMethodBytecode(self, interp, current_bytecode): return self._return(self.pop(), interp, self.s_home().s_sender()) - def returnTopFromBlock(self, interp, current_bytecode): + def returnTopFromBlockBytecode(self, interp, current_bytecode): return self._return(self.pop(), interp, self.s_sender()) def unknownBytecode(self, interp, current_bytecode): @@ -662,23 +662,23 @@ def shortJumpPosition(self, current_bytecode): return (current_bytecode & 7) + 1 - def shortUnconditionalJump(self, interp, current_bytecode): + def shortUnconditionalJumpBytecode(self, interp, current_bytecode): self.jump(self.shortJumpPosition(current_bytecode)) - def shortConditionalJump(self, interp, current_bytecode): + def shortConditionalJumpBytecode(self, interp, current_bytecode): # The conditional jump is "jump on false" self.jumpConditional(interp, False, self.shortJumpPosition(current_bytecode)) - def longUnconditionalJump(self, interp, current_bytecode): + def longUnconditionalJumpBytecode(self, interp, current_bytecode): self.jump((((current_bytecode & 7) - 4) << 8) + self.fetch_next_bytecode()) def longJumpPosition(self, current_bytecode): return ((current_bytecode & 3) << 8) + self.fetch_next_bytecode() - def longJumpIfTrue(self, interp, current_bytecode): + def longJumpIfTrueBytecode(self, interp, current_bytecode): self.jumpConditional(interp, True, self.longJumpPosition(current_bytecode)) - def longJumpIfFalse(self, interp, current_bytecode): + def longJumpIfFalseBytecode(self, interp, current_bytecode): self.jumpConditional(interp, False, self.longJumpPosition(current_bytecode)) bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) @@ -769,12 +769,12 @@ (117, "pushConstantZeroBytecode"), (118, "pushConstantOneBytecode"), (119, "pushConstantTwoBytecode"), - (120, "returnReceiver"), - (121, "returnTrue"), - (122, "returnFalse"), - (123, "returnNil"), - (124, "returnTopFromMethod"), - (125, "returnTopFromBlock"), + (120, "returnReceiverBytecode"), + (121, "returnTrueBytecode"), + (122, "returnFalseBytecode"), + (123, "returnNilBytecode"), + (124, "returnTopFromMethodBytecode"), + (125, "returnTopFromBlockBytecode"), (126, "unknownBytecode"), (127, "unknownBytecode"), (128, "extendedPushBytecode"), @@ -793,11 +793,11 @@ (141, "storeRemoteTempLongBytecode"), (142, "storeAndPopRemoteTempLongBytecode"), (143, "pushClosureCopyCopiedValuesBytecode"), - (144, 151, "shortUnconditionalJump"), - (152, 159, "shortConditionalJump"), - (160, 167, "longUnconditionalJump"), - (168, 171, "longJumpIfTrue"), - (172, 175, "longJumpIfFalse"), + (144, 151, "shortUnconditionalJumpBytecode"), + (152, 159, "shortConditionalJumpBytecode"), + (160, 167, "longUnconditionalJumpBytecode"), + (168, 171, "longJumpIfTrueBytecode"), + (172, 175, "longJumpIfFalseBytecode"), (176, "bytecodePrimAdd"), (177, "bytecodePrimSubtract"), (178, "bytecodePrimLessThan"), diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -122,7 +122,7 @@ # ===== These entry-points pre-load the image and directly execute a single frame. # func = preload_perform(imagename, model.W_SmallInteger(1000), 'loopTest2') # func = preload_perform(imagename, model.W_SmallInteger(777), 'name') - # func = preload_execute_frame(imagename, [returnReceiver], [], [model.W_SmallInteger(42)]) + # func = preload_execute_frame(imagename, [returnReceiverBytecodeBytecode], [], [model.W_SmallInteger(42)]) # ===== These execute the complete interpreter # ===== XXX These do not work because loading the image file while meta-interpreting always leads to diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -413,11 +413,11 @@ # w_object - the actual object we will be sending the method to # bytecodes - the bytecode to be executed def sendBytecodesTest(w_class, w_object, bytecodes): - for bytecode, result in [ (returnReceiver, w_object), - (returnTrue, space.w_true), - (returnFalse, space.w_false), - (returnNil, space.w_nil), - (returnTopFromMethod, space.w_one) ]: + for bytecode, result in [ (returnReceiverBytecode, w_object), + (returnTrueBytecode, space.w_true), + (returnFalseBytecode, space.w_false), + (returnNilBytecode, space.w_nil), + (returnTopFromMethodBytecode, space.w_one) ]: shadow = w_class.as_class_get_shadow(space) w_method = model.W_CompiledMethod(space, 2) w_method.bytes = pushConstantOneBytecode + bytecode @@ -457,7 +457,7 @@ method.setliterals(literals) shadow.installmethod(literals[0], method) w_object = shadow.new() - w_frame, s_frame = new_frame(sendLiteralSelectorBytecode(16) + returnTopFromMethod) + w_frame, s_frame = new_frame(sendLiteralSelectorBytecode(16) + returnTopFromMethodBytecode) s_frame.w_method().setliterals(literals) s_frame.push(w_object) s_frame.push(space.wrap_int(8)) @@ -494,8 +494,8 @@ assert point.x() == 0 assert point.y() == 1 -def test_longJumpIfTrue(): - w_frame, s_frame = new_frame(longJumpIfTrue(0) + chr(15) + longJumpIfTrue(0) + chr(15)) +def test_longJumpIfTrueBytecode(): + w_frame, s_frame = new_frame(longJumpIfTrueBytecode(0) + chr(15) + longJumpIfTrueBytecode(0) + chr(15)) s_frame.push(space.w_false) pc = s_frame.pc() + 2 step_in_interp(s_frame) @@ -505,9 +505,9 @@ step_in_interp(s_frame) assert s_frame.pc() == pc + 15 -def test_longJumpIfFalse(): - w_frame, s_frame = new_frame(pushConstantTrueBytecode + longJumpIfFalse(0) + chr(15) + - pushConstantFalseBytecode + longJumpIfFalse(0) + chr(15)) +def test_longJumpIfFalseBytecode(): + w_frame, s_frame = new_frame(pushConstantTrueBytecode + longJumpIfFalseBytecode(0) + chr(15) + + pushConstantFalseBytecode + longJumpIfFalseBytecode(0) + chr(15)) step_in_interp(s_frame) pc = s_frame.pc() + 2 step_in_interp(s_frame) @@ -517,21 +517,21 @@ step_in_interp(s_frame) assert s_frame.pc() == pc + 15 -def test_longUnconditionalJump(): - w_frame, s_frame = new_frame(longUnconditionalJump(4) + chr(15)) +def test_longUnconditionalJumpBytecode(): + w_frame, s_frame = new_frame(longUnconditionalJumpBytecode(4) + chr(15)) pc = s_frame.pc() + 2 step_in_interp(s_frame) assert s_frame.pc() == pc + 15 -def test_shortUnconditionalJump(): +def test_shortUnconditionalJumpBytecode(): w_frame, s_frame = new_frame(chr(145)) pc = s_frame.pc() + 1 step_in_interp(s_frame) assert s_frame.pc() == pc + 2 -def test_shortConditionalJump(): - w_frame, s_frame = new_frame(pushConstantTrueBytecode + shortConditionalJump(3) + - pushConstantFalseBytecode + shortConditionalJump(3)) +def test_shortConditionalJumpBytecode(): + w_frame, s_frame = new_frame(pushConstantTrueBytecode + shortConditionalJumpBytecode(3) + + pushConstantFalseBytecode + shortConditionalJumpBytecode(3)) step_in_interp(s_frame) pc = s_frame.pc() + 1 step_in_interp(s_frame) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -111,7 +111,7 @@ w_selector = interp.perform(space.wrap_string(benchmark), "asSymbol") w_method = model.W_CompiledMethod(space, header=512) w_method.literalatput0(space, 1, w_selector) - w_method.setbytes([chr(131), chr(argcount << 5), chr(124)]) #returnTopFromMethod + w_method.setbytes([chr(131), chr(argcount << 5), chr(124)]) #returnTopFromMethodBytecodeBytecode s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) s_frame.push(w_receiver) if not stringarg == "": From noreply at buildbot.pypy.org Wed May 14 14:51:05 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed whitespace in interpreter.py :( Message-ID: <20140514125105.C0F721C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r819:bf427e482576 Date: 2014-05-14 13:15 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/bf427e482576/ Log: Fixed whitespace in interpreter.py :( diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -85,7 +85,7 @@ except ProcessSwitch, p: if self.trace: print "====== Switched process from: %s" % s_new_context.short_str() - print "====== to: %s " % p.s_new_context.short_str() + print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context def loop_bytecodes(self, s_context, may_context_switch=True): @@ -126,20 +126,20 @@ self.loop_bytecodes(s_new_frame, may_context_switch) finally: self.remaining_stack_depth += 1 - - def step(self, context): - bytecode = context.fetch_next_bytecode() - for entry in UNROLLING_BYTECODE_RANGES: - if len(entry) == 2: - bc, methname = entry - if bytecode == bc: - return getattr(context, methname)(self, bytecode) - else: - start, stop, methname = entry - if start <= bytecode <= stop: - return getattr(context, methname)(self, bytecode) - assert 0, "unreachable" - + + def step(self, context): + bytecode = context.fetch_next_bytecode() + for entry in UNROLLING_BYTECODE_RANGES: + if len(entry) == 2: + bc, methname = entry + if bytecode == bc: + return getattr(context, methname)(self, bytecode) + else: + start, stop, methname = entry + if start <= bytecode <= stop: + return getattr(context, methname)(self, bytecode) + assert 0, "unreachable" + # ============== Methods for handling user interrupts ============== def jitted_check_for_interrupt(self, s_frame): From noreply at buildbot.pypy.org Wed May 14 14:51:06 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:06 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Small cleanups in tests. Message-ID: <20140514125106.BE0051C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r820:96b59003d67f Date: 2014-05-14 13:15 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/96b59003d67f/ Log: Small cleanups in tests. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -100,8 +100,6 @@ return s_frame.w_self(), s_frame def new_frame(bytes, receiver=None): - if not receiver: - receiver = space.w_nil return _new_frame(space, bytes, receiver) def test_create_frame(): @@ -219,7 +217,6 @@ step_in_interp(s_frame) assert s_frame.stack() == [] for test_index in range(8): - print w_frame.fetch_all(s_frame.space) if test_index == index: assert s_frame.gettemp(test_index).is_same_object(space.w_true) else: @@ -712,7 +709,7 @@ # ^ [ 3 + 4 ] value assert interpret_bc( [ 137, 117, 200, 164, 4, 32, 33, 176, 125, 201, 124], - fakeliterals(space, space.wrap_int(3), space.wrap_int(4))).value == 7 + fakeliterals(space, 3, 4)).value == 7 def test_bc_x_plus_x_plus_1(): @@ -724,7 +721,7 @@ assert interpret_bc( [ 137, 118, 200, 164, 7, 104, 16, 16, 176, 118, 176, 125, 32, 202, 124 ], - fakeliterals(space, space.wrap_int(3))).value == 7 + fakeliterals(space, 3)).value == 7 def test_bc_x_plus_y(): # value2 @@ -737,7 +734,7 @@ assert interpret_bc( [ 137, 119, 200, 164, 6, 105, 104, 16, 17, 176, 125, 33, 34, 240, 124 ], - fakeliterals(space, "value:value:", space.wrap_int(3), space.wrap_int(4))).value == 7 + fakeliterals(space, "value:value:", 3, 4)).value == 7 run_with_faked_primitive_methods( [[space.w_BlockContext, primitives.VALUE, 2, "value:value:"]], @@ -751,7 +748,7 @@ # ^ [ self ] value assert interpret_bc( [ 137, 117, 200, 164, 2, 112, 125, 201, 124 ], - fakeliterals(space, space.wrap_int(3))).is_nil(space) + fakeliterals(space, 3)).is_nil(space) def test_bc_value_return(): # valueReturn From noreply at buildbot.pypy.org Wed May 14 14:51:07 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:07 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Added common superclass for ProcessSwitch and StackOverflow. Message-ID: <20140514125107.BA4F81C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r821:6d70e836aec8 Date: 2014-05-14 13:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6d70e836aec8/ Log: Added common superclass for ProcessSwitch and StackOverflow. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -72,7 +72,7 @@ self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: - s_new_context = e.s_context + s_new_context = e.s_new_context except Return, nlr: s_new_context = s_sender while s_new_context is not nlr.s_target_context: @@ -221,22 +221,27 @@ def __init__(self, object): self.object = object -class StackOverflow(Exception): - _attrs_ = ["s_context"] - def __init__(self, s_top_context): - self.s_context = s_top_context - class Return(Exception): _attrs_ = ["value", "s_target_context"] - def __init__(self, object, s_context): - self.value = object - self.s_target_context = s_context + def __init__(self, s_target_context, w_result): + self.value = w_result + self.s_target_context = s_target_context -class ProcessSwitch(Exception): +class ContextSwitchException(Exception): + """General Exception that causes the interpreter to leave + the current context. The current pc is required in order to update + the context object that we are leaving.""" _attrs_ = ["s_new_context"] - def __init__(self, s_context): - self.s_new_context = s_context + def __init__(self, s_new_context): + self.s_new_context = s_new_context +class StackOverflow(ContextSwitchException): + """This causes the current jit-loop to be left. + This is an experimental mechanism to avoid stack-overflow errors + on OS level, and we suspect it breaks jit performance at least sometimes.""" + +class ProcessSwitch(ContextSwitchException): + """This causes the interpreter to switch the executed context.""" def make_call_primitive_bytecode(primitive, selector, argcount): def callPrimitive(self, interp, current_bytecode): @@ -443,7 +448,7 @@ if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - raise Return(return_value, s_return_to) + raise Return(s_return_to, return_value) def activate_unwind_context(self, interp): # the first temp is executed flag for both #ensure: and #ifCurtailed: diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1008,7 +1008,7 @@ interp._loop = True interp.loop_bytecodes(w_method.create_frame(space, space.wrap_int(0), [])) except interpreter.StackOverflow, e: - assert isinstance(e.s_context, shadow.MethodContextShadow) + assert isinstance(e.s_new_context, shadow.MethodContextShadow) except interpreter.ReturnFromTopLevel, e: assert False From noreply at buildbot.pypy.org Wed May 14 14:51:08 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:08 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Added decorator for bytecode implementation methods. Message-ID: <20140514125108.B2F691C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r822:66db7f3c7f67 Date: 2014-05-14 13:57 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/66db7f3c7f67/ Log: Added decorator for bytecode implementation methods. Decorator fetches additional bytes automatically, passes them into the implementation method as parameter (similar to unwrap_specs in primitives.py). Added factory-methods for bytecode implementations (make_send_selector_bytecode and make_quick_call_primitive_bytecode). Prepended underscore to some helper-methods. Added descriptive function-names to decorator functions. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -62,7 +62,7 @@ self.trace_proxy = False def loop(self, w_active_context): - # just a trampoline for the actual loop implemented in loop_bytecodes + # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: assert self.remaining_stack_depth == self.max_stack_depth @@ -243,7 +243,28 @@ class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context.""" -def make_call_primitive_bytecode(primitive, selector, argcount): +# This is a decorator for bytecode implementation methods. +# parameter_bytes=N means N additional bytes are fetched as parameters. +def bytecode_implementation(parameter_bytes=0): + def bytecode_implementation_decorator(actual_implementation_method): + from rpython.rlib.unroll import unrolling_zero + @jit.unroll_safe + def bytecode_implementation_wrapper(self, interp, current_bytecode): + parameters = () + i = unrolling_zero + while i < parameter_bytes: + parameters += (self.fetch_next_bytecode(), ) + i = i + 1 + # This is a good place to step through bytecodes. + # import pdb; pdb.set_trace() + return actual_implementation_method(self, interp, current_bytecode, *parameters) + bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name + return bytecode_implementation_wrapper + return bytecode_implementation_decorator + +def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): + func = primitives.prim_table[primitive] + @bytecode_implementation() def callPrimitive(self, interp, current_bytecode): # WARNING: this is used for bytecodes for which it is safe to # directly call the primitive. In general, it is not safe: for @@ -252,8 +273,6 @@ # else that the user put in a class in an 'at:' method. # The rule of thumb is that primitives with only int and float # in their unwrap_spec are safe. - # XXX move next line out of callPrimitive? - func = primitives.prim_table[primitive] try: return func(interp, self, argcount) except primitives.PrimitiveFailedError: @@ -262,7 +281,8 @@ return callPrimitive def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): - def callPrimitive(self, interp, current_bytecode): + @bytecode_implementation() + def callClassbasedPrimitive(self, interp, current_bytecode): rcvr = self.peek(argcount) receiver_class = rcvr.getclass(self.space) try: @@ -275,7 +295,25 @@ except primitives.PrimitiveFailedError: pass return self._sendSelfSelectorSpecial(selector, argcount, interp) - return callPrimitive + callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector + return callClassbasedPrimitive + +# Some selectors cannot be overwritten, therefore no need to handle PrimitiveFailed. +def make_quick_call_primitive_bytecode(primitive_index, argcount): + func = primitives.prim_table[primitive_index] + @bytecode_implementation() + def quick_call_primitive_bytecode(self, interp, current_bytecode): + return func(interp, self, argcount) + return quick_call_primitive_bytecode + +# This is for bytecodes that actually implement a simple message-send. +# We do not optimize anything for these cases. +def make_send_selector_bytecode(selector, argcount): + @bytecode_implementation() + def selector_bytecode(self, interp, current_bytecode): + return self._sendSelfSelectorSpecial(selector, argcount, interp) + selector_bytecode.func_name = "selector_bytecode_%s" % selector + return selector_bytecode # ___________________________________________________________________________ # Bytecode Implementations: @@ -284,19 +322,25 @@ # __extend__ adds new methods to the ContextPartShadow class class __extend__(ContextPartShadow): - # push bytecodes + + # ====== Push/Pop bytecodes ====== + + @bytecode_implementation() def pushReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 self.push(self.w_receiver().fetch(self.space, index)) + @bytecode_implementation() def pushTemporaryVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 self.push(self.gettemp(index)) + @bytecode_implementation() def pushLiteralConstantBytecode(self, interp, current_bytecode): index = current_bytecode & 31 self.push(self.w_method().getliteral(index)) + @bytecode_implementation() def pushLiteralVariableBytecode(self, interp, current_bytecode): # this bytecode assumes that literals[index] is an Association # which is an object with two named vars, and fetches the second @@ -306,46 +350,59 @@ association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) + @bytecode_implementation() def storeAndPopReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 7 self.w_receiver().store(self.space, index, self.pop()) + @bytecode_implementation() def storeAndPopTemporaryVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 7 self.settemp(index, self.pop()) - # push bytecodes + @bytecode_implementation() def pushReceiverBytecode(self, interp, current_bytecode): self.push(self.w_receiver()) + @bytecode_implementation() def pushConstantTrueBytecode(self, interp, current_bytecode): self.push(interp.space.w_true) + @bytecode_implementation() def pushConstantFalseBytecode(self, interp, current_bytecode): self.push(interp.space.w_false) + @bytecode_implementation() def pushConstantNilBytecode(self, interp, current_bytecode): self.push(interp.space.w_nil) + @bytecode_implementation() def pushConstantMinusOneBytecode(self, interp, current_bytecode): self.push(interp.space.w_minus_one) + @bytecode_implementation() def pushConstantZeroBytecode(self, interp, current_bytecode): self.push(interp.space.w_zero) + @bytecode_implementation() def pushConstantOneBytecode(self, interp, current_bytecode): self.push(interp.space.w_one) + @bytecode_implementation() def pushConstantTwoBytecode(self, interp, current_bytecode): self.push(interp.space.w_two) + @bytecode_implementation() def pushActiveContextBytecode(self, interp, current_bytecode): self.push(self.w_self()) + @bytecode_implementation() def duplicateTopBytecode(self, interp, current_bytecode): self.push(self.top()) - # send, return bytecodes + # ====== Send/Return bytecodes ====== + + @bytecode_implementation() def sendLiteralSelectorBytecode(self, interp, current_bytecode): w_selector = self.w_method().getliteral(current_bytecode & 15) argcount = ((current_bytecode >> 4) & 3) - 1 @@ -464,34 +521,40 @@ self.mark_returned() raise nlr + @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): return self._return(self.w_receiver(), interp, self.s_home().s_sender()) + @bytecode_implementation() def returnTrueBytecode(self, interp, current_bytecode): return self._return(interp.space.w_true, interp, self.s_home().s_sender()) + @bytecode_implementation() def returnFalseBytecode(self, interp, current_bytecode): return self._return(interp.space.w_false, interp, self.s_home().s_sender()) + @bytecode_implementation() def returnNilBytecode(self, interp, current_bytecode): return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) + @bytecode_implementation() def returnTopFromMethodBytecode(self, interp, current_bytecode): return self._return(self.pop(), interp, self.s_home().s_sender()) + @bytecode_implementation() def returnTopFromBlockBytecode(self, interp, current_bytecode): return self._return(self.pop(), interp, self.s_sender()) + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise MissingBytecode("unknownBytecode") - def extendedVariableTypeAndIndex(self): - # AK please explain this method (a helper, I guess) - descriptor = self.fetch_next_bytecode() + def _extendedVariableTypeAndIndex(self, descriptor): return ((descriptor >> 6) & 3), (descriptor & 63) - def extendedPushBytecode(self, interp, current_bytecode): - variableType, variableIndex = self.extendedVariableTypeAndIndex() + @bytecode_implementation(parameter_bytes=1) + def extendedPushBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) if variableType == 0: self.push(self.w_receiver().fetch(self.space, variableIndex)) elif variableType == 1: @@ -505,8 +568,8 @@ else: assert 0 - def extendedStoreBytecode(self, interp, current_bytecode): - variableType, variableIndex = self.extendedVariableTypeAndIndex() + def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) if variableType == 0: self.w_receiver().store(self.space, variableIndex, self.top()) elif variableType == 1: @@ -518,23 +581,27 @@ association = wrapper.AssociationWrapper(self.space, w_association) association.store_value(self.top()) - def extendedStoreAndPopBytecode(self, interp, current_bytecode): - self.extendedStoreBytecode(interp, current_bytecode) + @bytecode_implementation(parameter_bytes=1) + def extendedStoreBytecode(self, interp, current_bytecode, descriptor): + return self._extendedStoreBytecode(interp, current_bytecode, descriptor) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): + self._extendedStoreBytecode(interp, current_bytecode, descriptor) self.pop() - def getExtendedSelectorArgcount(self): - descriptor = self.fetch_next_bytecode() + def _getExtendedSelectorArgcount(self, descriptor): return ((self.w_method().getliteral(descriptor & 31)), (descriptor >> 5)) - def singleExtendedSendBytecode(self, interp, current_bytecode): - w_selector, argcount = self.getExtendedSelectorArgcount() + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSendBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) return self._sendSelfSelector(w_selector, argcount, interp) - def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode): + @bytecode_implementation(parameter_bytes=2) + def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): from spyvm import error - second = self.fetch_next_bytecode() - third = self.fetch_next_bytecode() opType = second >> 5 if opType == 0: # selfsend @@ -570,22 +637,24 @@ association = wrapper.AssociationWrapper(self.space, w_association) association.store_value(self.top()) - def singleExtendedSuperBytecode(self, interp, current_bytecode): - w_selector, argcount = self.getExtendedSelectorArgcount() + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSuperBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) return self._sendSuperSelector(w_selector, argcount, interp) - def secondExtendedSendBytecode(self, interp, current_bytecode): - descriptor = self.fetch_next_bytecode() + @bytecode_implementation(parameter_bytes=1) + def secondExtendedSendBytecode(self, interp, current_bytecode, descriptor): w_selector = self.w_method().getliteral(descriptor & 63) argcount = descriptor >> 6 return self._sendSelfSelector(w_selector, argcount, interp) + @bytecode_implementation() def popStackBytecode(self, interp, current_bytecode): self.pop() - - # closure bytecodes - def pushNewArrayBytecode(self, interp, current_bytecode): - arraySize, popIntoArray = splitter[7, 1](self.fetch_next_bytecode()) + + @bytecode_implementation(parameter_bytes=1) + def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): + arraySize, popIntoArray = splitter[7, 1](descriptor) newArray = None if popIntoArray == 1: newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) @@ -593,28 +662,31 @@ newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) self.push(newArray) + @bytecode_implementation() def experimentalBytecode(self, interp, current_bytecode): raise MissingBytecode("experimentalBytecode") - def _extract_index_and_temps(self): - index_in_array = self.fetch_next_bytecode() - index_of_array = self.fetch_next_bytecode() + def _extract_index_and_temps(self, index_in_array, index_of_array): w_indirectTemps = self.gettemp(index_of_array) return index_in_array, w_indirectTemps - - def pushRemoteTempLongBytecode(self, interp, current_bytecode): - index_in_array, w_indirectTemps = self._extract_index_and_temps() + + @bytecode_implementation(parameter_bytes=2) + def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) self.push(w_indirectTemps.at0(self.space, index_in_array)) - def storeRemoteTempLongBytecode(self, interp, current_bytecode): - index_in_array, w_indirectTemps = self._extract_index_and_temps() + @bytecode_implementation(parameter_bytes=2) + def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) w_indirectTemps.atput0(self.space, index_in_array, self.top()) - def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode): - index_in_array, w_indirectTemps = self._extract_index_and_temps() + @bytecode_implementation(parameter_bytes=2) + def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) w_indirectTemps.atput0(self.space, index_in_array, self.pop()) - def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode): + @bytecode_implementation(parameter_bytes=3) + def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize "Simulate the action of a 'closure copy' bytecode whose result is the @@ -633,23 +705,22 @@ startpc: pc numArgs: numArgs copiedValues: copiedValues). - self jump: blockSize + self _jump: blockSize """ + space = self.space - numArgs, numCopied = splitter[4, 4](self.fetch_next_bytecode()) - j = self.fetch_next_bytecode() - i = self.fetch_next_bytecode() + numArgs, numCopied = splitter[4, 4](descriptor) blockSize = (j << 8) | i #create new instance of BlockClosure w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, self.pop_and_return_n(numCopied)) self.push(w_closure) - self.jump(blockSize) + self._jump(blockSize) - def jump(self, offset): + def _jump(self, offset): self.store_pc(self.pc() + offset) - def jumpConditional(self, interp, expecting_true, position): + def _jumpConditional(self, interp, expecting_true, position): if expecting_true: w_expected = interp.space.w_true w_alternative = interp.space.w_false @@ -660,31 +731,36 @@ # Don't check the class, just compare with only two instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): - self.jump(position) + self._jump(position) elif not w_alternative.is_same_object(w_bool): self._mustBeBoolean(interp, w_bool) - def shortJumpPosition(self, current_bytecode): + def _shortJumpPosition(self, current_bytecode): return (current_bytecode & 7) + 1 + def _longJumpPosition(self, current_bytecode, parameter): + return ((current_bytecode & 3) << 8) + parameter + + @bytecode_implementation() def shortUnconditionalJumpBytecode(self, interp, current_bytecode): - self.jump(self.shortJumpPosition(current_bytecode)) + self._jump(self._shortJumpPosition(current_bytecode)) + @bytecode_implementation() def shortConditionalJumpBytecode(self, interp, current_bytecode): - # The conditional jump is "jump on false" - self.jumpConditional(interp, False, self.shortJumpPosition(current_bytecode)) + # The conditional _jump is "_jump on false" + self._jumpConditional(interp, False, self._shortJumpPosition(current_bytecode)) - def longUnconditionalJumpBytecode(self, interp, current_bytecode): - self.jump((((current_bytecode & 7) - 4) << 8) + self.fetch_next_bytecode()) + @bytecode_implementation(parameter_bytes=1) + def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): + self._jump((((current_bytecode & 7) - 4) << 8) + parameter) - def longJumpPosition(self, current_bytecode): - return ((current_bytecode & 3) << 8) + self.fetch_next_bytecode() + @bytecode_implementation(parameter_bytes=1) + def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, True, self._longJumpPosition(current_bytecode, parameter)) - def longJumpIfTrueBytecode(self, interp, current_bytecode): - self.jumpConditional(interp, True, self.longJumpPosition(current_bytecode)) - - def longJumpIfFalseBytecode(self, interp, current_bytecode): - self.jumpConditional(interp, False, self.longJumpPosition(current_bytecode)) + @bytecode_implementation(parameter_bytes=1) + def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, False, self._longJumpPosition(current_bytecode, parameter)) bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) @@ -708,56 +784,25 @@ w_selector = self.space.get_special_selector(selector) return self._sendSelfSelector(w_selector, numargs, interp) - def bytecodePrimAt(self, interp, current_bytecode): - # n.b.: depending on the type of the receiver, this may invoke - # primitives.AT, primitives.STRING_AT, or something else for all - # I know. - return self._sendSelfSelectorSpecial("at:", 1, interp) + bytecodePrimAt = make_send_selector_bytecode("at:", 1) + bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) + bytecodePrimSize = make_send_selector_bytecode("size", 0) + bytecodePrimNext = make_send_selector_bytecode("next", 0) + bytecodePrimNextPut = make_send_selector_bytecode("nextPut:", 1) + bytecodePrimAtEnd = make_send_selector_bytecode("atEnd", 0) - def bytecodePrimAtPut(self, interp, current_bytecode): - # n.b. as above - return self._sendSelfSelectorSpecial("at:put:", 2, interp) - - def bytecodePrimSize(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("size", 0, interp) - - def bytecodePrimNext(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("next", 0, interp) - - def bytecodePrimNextPut(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("nextPut:", 1, interp) - - def bytecodePrimAtEnd(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("atEnd", 0, interp) - - def bytecodePrimEquivalent(self, interp, current_bytecode): - # short-circuit: classes cannot override the '==' method, - # which cannot fail - primitives.prim_table[primitives.EQUIVALENT](interp, self, 1) - - def bytecodePrimClass(self, interp, current_bytecode): - # short-circuit: classes cannot override the 'class' method, - # which cannot fail - primitives.prim_table[primitives.CLASS](interp, self, 0) + bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) + bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) - def bytecodePrimDo(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("do:", 1, interp) - - def bytecodePrimNew(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("new", 0, interp) - - def bytecodePrimNewWithArg(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("new:", 1, interp) - - def bytecodePrimPointX(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("x", 0, interp) - - def bytecodePrimPointY(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("y", 0, interp) + bytecodePrimDo = make_send_selector_bytecode("do:", 1) + bytecodePrimNew = make_send_selector_bytecode("new", 0) + bytecodePrimNewWithArg = make_send_selector_bytecode("new:", 1) + bytecodePrimPointX = make_send_selector_bytecode("x", 0) + bytecodePrimPointY = make_send_selector_bytecode("y", 0) BYTECODE_RANGES = [ ( 0, 15, "pushReceiverVariableBytecode"), diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1016,8 +1016,8 @@ def stack_frame(self, w_frame, may_interrupt=True): stack_depth = self.max_stack_depth - self.remaining_stack_depth for i in range(stack_depth + 1): - assert sys._getframe(4 + i * 6).f_code.co_name == 'loop_bytecodes' - assert sys._getframe(5 + stack_depth * 6).f_code.co_name == 'loop' + assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' + assert sys._getframe(6 + stack_depth * 7).f_code.co_name == 'loop' return interpreter.Interpreter.stack_frame(self, w_frame) def test_actual_stackdepth(): From noreply at buildbot.pypy.org Wed May 14 14:51:09 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:09 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Moved around some bytecode implementations to group them in a consisteny way. Message-ID: <20140514125109.B86CF1C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r823:e225c9de09c5 Date: 2014-05-14 14:04 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e225c9de09c5/ Log: Moved around some bytecode implementations to group them in a consisteny way. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -400,13 +400,115 @@ def duplicateTopBytecode(self, interp, current_bytecode): self.push(self.top()) - # ====== Send/Return bytecodes ====== + @bytecode_implementation() + def popStackBytecode(self, interp, current_bytecode): + self.pop() - @bytecode_implementation() - def sendLiteralSelectorBytecode(self, interp, current_bytecode): - w_selector = self.w_method().getliteral(current_bytecode & 15) - argcount = ((current_bytecode >> 4) & 3) - 1 - return self._sendSelfSelector(w_selector, argcount, interp) + @bytecode_implementation(parameter_bytes=1) + def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): + arraySize, popIntoArray = splitter[7, 1](descriptor) + newArray = None + if popIntoArray == 1: + newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) + else: + newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) + self.push(newArray) + + # ====== Extended Push/Pop bytecodes ====== + + def _extendedVariableTypeAndIndex(self, descriptor): + return ((descriptor >> 6) & 3), (descriptor & 63) + + @bytecode_implementation(parameter_bytes=1) + def extendedPushBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.push(self.w_receiver().fetch(self.space, variableIndex)) + elif variableType == 1: + self.push(self.gettemp(variableIndex)) + elif variableType == 2: + self.push(self.w_method().getliteral(variableIndex)) + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + else: + assert 0 + + def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.w_receiver().store(self.space, variableIndex, self.top()) + elif variableType == 1: + self.settemp(variableIndex, self.top()) + elif variableType == 2: + raise IllegalStoreError + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + association.store_value(self.top()) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreBytecode(self, interp, current_bytecode, descriptor): + return self._extendedStoreBytecode(interp, current_bytecode, descriptor) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): + self._extendedStoreBytecode(interp, current_bytecode, descriptor) + self.pop() + + def _extract_index_and_temps(self, index_in_array, index_of_array): + w_indirectTemps = self.gettemp(index_of_array) + return index_in_array, w_indirectTemps + + @bytecode_implementation(parameter_bytes=2) + def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + self.push(w_indirectTemps.at0(self.space, index_in_array)) + + @bytecode_implementation(parameter_bytes=2) + def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.top()) + + @bytecode_implementation(parameter_bytes=2) + def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.pop()) + + @bytecode_implementation(parameter_bytes=3) + def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): + """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ + ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize + "Simulate the action of a 'closure copy' bytecode whose result is the + new BlockClosure for the following code" + | copiedValues | + numCopied > 0 + ifTrue: + [copiedValues := Array new: numCopied. + numCopied to: 1 by: -1 do: + [:i| + copiedValues at: i put: self pop]] + ifFalse: + [copiedValues := nil]. + self push: (BlockClosure new + outerContext: self + startpc: pc + numArgs: numArgs + copiedValues: copiedValues). + self jump: blockSize + """ + + space = self.space + numArgs, numCopied = splitter[4, 4](descriptor) + blockSize = (j << 8) | i + # Create new instance of BlockClosure + w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, + self.pop_and_return_n(numCopied)) + self.push(w_closure) + self._jump(blockSize) + + # ====== Helpers for send/return bytecodes ====== def _sendSelfSelector(self, w_selector, argcount, interp): receiver = self.peek(argcount) @@ -444,6 +546,11 @@ return interp.stack_frame(s_frame) + @objectmodel.specialize.arg(1) + def _sendSelfSelectorSpecial(self, selector, numargs, interp): + w_selector = self.space.get_special_selector(selector) + return self._sendSelfSelector(w_selector, numargs, interp) + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): w_special_selector = self.space.objtable["w_" + special_selector] s_class = receiver.class_shadow(self.space) @@ -507,19 +614,7 @@ print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) raise Return(s_return_to, return_value) - def activate_unwind_context(self, interp): - # the first temp is executed flag for both #ensure: and #ifCurtailed: - if self.gettemp(1).is_nil(self.space): - self.settemp(1, self.space.w_true) # mark unwound - self.push(self.gettemp(0)) # push the first argument - try: - self.bytecodePrimValue(interp, 0) - except Return, nlr: - if self is nlr.s_target_context: - return - else: - self.mark_returned() - raise nlr + # ====== Send/Return bytecodes ====== @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): @@ -546,49 +641,10 @@ return self._return(self.pop(), interp, self.s_sender()) @bytecode_implementation() - def unknownBytecode(self, interp, current_bytecode): - raise MissingBytecode("unknownBytecode") - - def _extendedVariableTypeAndIndex(self, descriptor): - return ((descriptor >> 6) & 3), (descriptor & 63) - - @bytecode_implementation(parameter_bytes=1) - def extendedPushBytecode(self, interp, current_bytecode, descriptor): - variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) - if variableType == 0: - self.push(self.w_receiver().fetch(self.space, variableIndex)) - elif variableType == 1: - self.push(self.gettemp(variableIndex)) - elif variableType == 2: - self.push(self.w_method().getliteral(variableIndex)) - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - else: - assert 0 - - def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): - variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) - if variableType == 0: - self.w_receiver().store(self.space, variableIndex, self.top()) - elif variableType == 1: - self.settemp(variableIndex, self.top()) - elif variableType == 2: - raise IllegalStoreError - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - association.store_value(self.top()) - - @bytecode_implementation(parameter_bytes=1) - def extendedStoreBytecode(self, interp, current_bytecode, descriptor): - return self._extendedStoreBytecode(interp, current_bytecode, descriptor) - - @bytecode_implementation(parameter_bytes=1) - def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): - self._extendedStoreBytecode(interp, current_bytecode, descriptor) - self.pop() + def sendLiteralSelectorBytecode(self, interp, current_bytecode): + w_selector = self.w_method().getliteral(current_bytecode & 15) + argcount = ((current_bytecode >> 4) & 3) - 1 + return self._sendSelfSelector(w_selector, argcount, interp) def _getExtendedSelectorArgcount(self, descriptor): return ((self.w_method().getliteral(descriptor & 31)), @@ -648,74 +704,31 @@ argcount = descriptor >> 6 return self._sendSelfSelector(w_selector, argcount, interp) + # ====== Misc ====== + + def activate_unwind_context(self, interp): + # the first temp is executed flag for both #ensure: and #ifCurtailed: + if self.gettemp(1).is_nil(self.space): + self.settemp(1, self.space.w_true) # mark unwound + self.push(self.gettemp(0)) # push the first argument + try: + self.bytecodePrimValue(interp, 0) + except Return, nlr: + if self is nlr.s_target_context: + return + else: + self.mark_returned() + raise nlr + @bytecode_implementation() - def popStackBytecode(self, interp, current_bytecode): - self.pop() - - @bytecode_implementation(parameter_bytes=1) - def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): - arraySize, popIntoArray = splitter[7, 1](descriptor) - newArray = None - if popIntoArray == 1: - newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) - else: - newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) - self.push(newArray) - + def unknownBytecode(self, interp, current_bytecode): + raise MissingBytecode("unknownBytecode") + @bytecode_implementation() def experimentalBytecode(self, interp, current_bytecode): raise MissingBytecode("experimentalBytecode") - def _extract_index_and_temps(self, index_in_array, index_of_array): - w_indirectTemps = self.gettemp(index_of_array) - return index_in_array, w_indirectTemps - - @bytecode_implementation(parameter_bytes=2) - def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - self.push(w_indirectTemps.at0(self.space, index_in_array)) - - @bytecode_implementation(parameter_bytes=2) - def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - w_indirectTemps.atput0(self.space, index_in_array, self.top()) - - @bytecode_implementation(parameter_bytes=2) - def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - w_indirectTemps.atput0(self.space, index_in_array, self.pop()) - - @bytecode_implementation(parameter_bytes=3) - def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): - """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ - ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize - "Simulate the action of a 'closure copy' bytecode whose result is the - new BlockClosure for the following code" - | copiedValues | - numCopied > 0 - ifTrue: - [copiedValues := Array new: numCopied. - numCopied to: 1 by: -1 do: - [:i| - copiedValues at: i put: self pop]] - ifFalse: - [copiedValues := nil]. - self push: (BlockClosure new - outerContext: self - startpc: pc - numArgs: numArgs - copiedValues: copiedValues). - self _jump: blockSize - """ - - space = self.space - numArgs, numCopied = splitter[4, 4](descriptor) - blockSize = (j << 8) | i - #create new instance of BlockClosure - w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, - self.pop_and_return_n(numCopied)) - self.push(w_closure) - self._jump(blockSize) + # ====== Jump bytecodes ====== def _jump(self, offset): self.store_pc(self.pc() + offset) @@ -762,6 +775,8 @@ def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): self._jumpConditional(interp, False, self._longJumpPosition(current_bytecode, parameter)) + # ====== Bytecodes implemented with primitives and message sends ====== + bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) bytecodePrimLessThan = make_call_primitive_bytecode (primitives.LESSTHAN, "<", 1) @@ -779,11 +794,6 @@ bytecodePrimBitAnd = make_call_primitive_bytecode(primitives.BIT_AND, "bitAnd:", 1) bytecodePrimBitOr = make_call_primitive_bytecode(primitives.BIT_OR, "bitOr:", 1) - @objectmodel.specialize.arg(1) - def _sendSelfSelectorSpecial(self, selector, numargs, interp): - w_selector = self.space.get_special_selector(selector) - return self._sendSelfSelector(w_selector, numargs, interp) - bytecodePrimAt = make_send_selector_bytecode("at:", 1) bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) bytecodePrimSize = make_send_selector_bytecode("size", 0) From noreply at buildbot.pypy.org Wed May 14 14:51:10 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Refactored activate_unwind_context for less code duplication. Message-ID: <20140514125110.B172C1C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r824:dd88f5e52d5b Date: 2014-05-14 14:13 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/dd88f5e52d5b/ Log: Refactored activate_unwind_context for less code duplication. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -77,9 +77,7 @@ s_new_context = s_sender while s_new_context is not nlr.s_target_context: s_sender = s_new_context.s_sender() - if not s_new_context.is_closure_context() and s_new_context.w_method().primitive() == 198: - s_new_context.activate_unwind_context(self) - s_new_context.mark_returned() + s_new_context._activate_unwind_context(self) s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: @@ -109,9 +107,7 @@ self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: - if not s_context.is_closure_context() and method.primitive() == 198: - s_context.activate_unwind_context(self) - s_context.mark_returned() + s_context._activate_unwind_context(self) raise nlr else: s_context.push(nlr.value) @@ -604,14 +600,16 @@ raise e def _return(self, return_value, interp, s_return_to): - # for tests, when returning from the top-level context - if s_return_to is None: - raise ReturnFromTopLevel(return_value) - # unfortunately, the assert below is not true for some tests + # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() - + + # ################################################################## if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) + + if s_return_to is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @@ -705,21 +703,25 @@ return self._sendSelfSelector(w_selector, argcount, interp) # ====== Misc ====== - - def activate_unwind_context(self, interp): - # the first temp is executed flag for both #ensure: and #ifCurtailed: + + def _activate_unwind_context(self, interp): + # TODO put the constant somewhere else. + # Primitive 198 is used in BlockClosure >> ensure: + if self.is_closure_context() or self.w_method().primitive() != 198: + self.mark_returned() + return + # The first temp is executed flag for both #ensure: and #ifCurtailed: if self.gettemp(1).is_nil(self.space): self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument try: self.bytecodePrimValue(interp, 0) except Return, nlr: - if self is nlr.s_target_context: - return - else: - self.mark_returned() + if self is not nlr.s_target_context: raise nlr - + finally: + self.mark_returned() + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise MissingBytecode("unknownBytecode") @@ -741,39 +743,40 @@ w_alternative = interp.space.w_true w_expected = interp.space.w_false - # Don't check the class, just compare with only two instances. + # Don't check the class, just compare with only two Boolean instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): self._jump(position) elif not w_alternative.is_same_object(w_bool): self._mustBeBoolean(interp, w_bool) - def _shortJumpPosition(self, current_bytecode): + def _shortJumpOffset(self, current_bytecode): return (current_bytecode & 7) + 1 - def _longJumpPosition(self, current_bytecode, parameter): + def _longJumpOffset(self, current_bytecode, parameter): return ((current_bytecode & 3) << 8) + parameter @bytecode_implementation() def shortUnconditionalJumpBytecode(self, interp, current_bytecode): - self._jump(self._shortJumpPosition(current_bytecode)) + self._jump(self._shortJumpOffset(current_bytecode)) @bytecode_implementation() def shortConditionalJumpBytecode(self, interp, current_bytecode): - # The conditional _jump is "_jump on false" - self._jumpConditional(interp, False, self._shortJumpPosition(current_bytecode)) + # The conditional jump is "jump on false" + self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) @bytecode_implementation(parameter_bytes=1) def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): - self._jump((((current_bytecode & 7) - 4) << 8) + parameter) + offset = (((current_bytecode & 7) - 4) << 8) + parameter + self._jump(offset) @bytecode_implementation(parameter_bytes=1) def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, True, self._longJumpPosition(current_bytecode, parameter)) + self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) @bytecode_implementation(parameter_bytes=1) def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, False, self._longJumpPosition(current_bytecode, parameter)) + self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) # ====== Bytecodes implemented with primitives and message sends ====== From noreply at buildbot.pypy.org Wed May 14 14:51:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Added one more descriptive function name for decorated function. Message-ID: <20140514125111.AD89C1C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r825:23bb793ccadf Date: 2014-05-14 14:31 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/23bb793ccadf/ Log: Added one more descriptive function name for decorated function. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -274,6 +274,7 @@ except primitives.PrimitiveFailedError: pass return self._sendSelfSelectorSpecial(selector, argcount, interp) + callPrimitive.func_name = "callPrimitive_%s" % func.func_name return callPrimitive def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): From noreply at buildbot.pypy.org Wed May 14 14:51:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged branch storage-refactoring-virtual-pc. Message-ID: <20140514125112.AC3F21C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r826:b79f58f23cbc Date: 2014-05-14 14:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b79f58f23cbc/ Log: Merged branch storage-refactoring-virtual-pc. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -62,7 +62,7 @@ self.trace_proxy = False def loop(self, w_active_context): - # just a trampoline for the actual loop implemented in loop_bytecodes + # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: assert self.remaining_stack_depth == self.max_stack_depth @@ -72,14 +72,12 @@ self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: - s_new_context = e.s_context + s_new_context = e.s_new_context except Return, nlr: s_new_context = s_sender while s_new_context is not nlr.s_target_context: s_sender = s_new_context.s_sender() - if not s_new_context.is_closure_context() and s_new_context.w_method().primitive() == 198: - s_new_context.activate_unwind_context(self) - s_new_context.mark_returned() + s_new_context._activate_unwind_context(self) s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: @@ -109,9 +107,7 @@ self.step(s_context) except Return, nlr: if nlr.s_target_context is not s_context: - if not s_context.is_closure_context() and method.primitive() == 198: - s_context.activate_unwind_context(self) - s_context.mark_returned() + s_context._activate_unwind_context(self) raise nlr else: s_context.push(nlr.value) @@ -221,24 +217,50 @@ def __init__(self, object): self.object = object -class StackOverflow(Exception): - _attrs_ = ["s_context"] - def __init__(self, s_top_context): - self.s_context = s_top_context - class Return(Exception): _attrs_ = ["value", "s_target_context"] - def __init__(self, object, s_context): - self.value = object - self.s_target_context = s_context + def __init__(self, s_target_context, w_result): + self.value = w_result + self.s_target_context = s_target_context -class ProcessSwitch(Exception): +class ContextSwitchException(Exception): + """General Exception that causes the interpreter to leave + the current context. The current pc is required in order to update + the context object that we are leaving.""" _attrs_ = ["s_new_context"] - def __init__(self, s_context): - self.s_new_context = s_context + def __init__(self, s_new_context): + self.s_new_context = s_new_context +class StackOverflow(ContextSwitchException): + """This causes the current jit-loop to be left. + This is an experimental mechanism to avoid stack-overflow errors + on OS level, and we suspect it breaks jit performance at least sometimes.""" -def make_call_primitive_bytecode(primitive, selector, argcount): +class ProcessSwitch(ContextSwitchException): + """This causes the interpreter to switch the executed context.""" + +# This is a decorator for bytecode implementation methods. +# parameter_bytes=N means N additional bytes are fetched as parameters. +def bytecode_implementation(parameter_bytes=0): + def bytecode_implementation_decorator(actual_implementation_method): + from rpython.rlib.unroll import unrolling_zero + @jit.unroll_safe + def bytecode_implementation_wrapper(self, interp, current_bytecode): + parameters = () + i = unrolling_zero + while i < parameter_bytes: + parameters += (self.fetch_next_bytecode(), ) + i = i + 1 + # This is a good place to step through bytecodes. + # import pdb; pdb.set_trace() + return actual_implementation_method(self, interp, current_bytecode, *parameters) + bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name + return bytecode_implementation_wrapper + return bytecode_implementation_decorator + +def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): + func = primitives.prim_table[primitive] + @bytecode_implementation() def callPrimitive(self, interp, current_bytecode): # WARNING: this is used for bytecodes for which it is safe to # directly call the primitive. In general, it is not safe: for @@ -247,17 +269,17 @@ # else that the user put in a class in an 'at:' method. # The rule of thumb is that primitives with only int and float # in their unwrap_spec are safe. - # XXX move next line out of callPrimitive? - func = primitives.prim_table[primitive] try: return func(interp, self, argcount) except primitives.PrimitiveFailedError: pass return self._sendSelfSelectorSpecial(selector, argcount, interp) + callPrimitive.func_name = "callPrimitive_%s" % func.func_name return callPrimitive def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): - def callPrimitive(self, interp, current_bytecode): + @bytecode_implementation() + def callClassbasedPrimitive(self, interp, current_bytecode): rcvr = self.peek(argcount) receiver_class = rcvr.getclass(self.space) try: @@ -270,7 +292,25 @@ except primitives.PrimitiveFailedError: pass return self._sendSelfSelectorSpecial(selector, argcount, interp) - return callPrimitive + callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector + return callClassbasedPrimitive + +# Some selectors cannot be overwritten, therefore no need to handle PrimitiveFailed. +def make_quick_call_primitive_bytecode(primitive_index, argcount): + func = primitives.prim_table[primitive_index] + @bytecode_implementation() + def quick_call_primitive_bytecode(self, interp, current_bytecode): + return func(interp, self, argcount) + return quick_call_primitive_bytecode + +# This is for bytecodes that actually implement a simple message-send. +# We do not optimize anything for these cases. +def make_send_selector_bytecode(selector, argcount): + @bytecode_implementation() + def selector_bytecode(self, interp, current_bytecode): + return self._sendSelfSelectorSpecial(selector, argcount, interp) + selector_bytecode.func_name = "selector_bytecode_%s" % selector + return selector_bytecode # ___________________________________________________________________________ # Bytecode Implementations: @@ -279,19 +319,25 @@ # __extend__ adds new methods to the ContextPartShadow class class __extend__(ContextPartShadow): - # push bytecodes + + # ====== Push/Pop bytecodes ====== + + @bytecode_implementation() def pushReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 self.push(self.w_receiver().fetch(self.space, index)) + @bytecode_implementation() def pushTemporaryVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 self.push(self.gettemp(index)) + @bytecode_implementation() def pushLiteralConstantBytecode(self, interp, current_bytecode): index = current_bytecode & 31 self.push(self.w_method().getliteral(index)) + @bytecode_implementation() def pushLiteralVariableBytecode(self, interp, current_bytecode): # this bytecode assumes that literals[index] is an Association # which is an object with two named vars, and fetches the second @@ -301,50 +347,165 @@ association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) + @bytecode_implementation() def storeAndPopReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 7 self.w_receiver().store(self.space, index, self.pop()) + @bytecode_implementation() def storeAndPopTemporaryVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 7 self.settemp(index, self.pop()) - # push bytecodes + @bytecode_implementation() def pushReceiverBytecode(self, interp, current_bytecode): self.push(self.w_receiver()) + @bytecode_implementation() def pushConstantTrueBytecode(self, interp, current_bytecode): self.push(interp.space.w_true) + @bytecode_implementation() def pushConstantFalseBytecode(self, interp, current_bytecode): self.push(interp.space.w_false) + @bytecode_implementation() def pushConstantNilBytecode(self, interp, current_bytecode): self.push(interp.space.w_nil) + @bytecode_implementation() def pushConstantMinusOneBytecode(self, interp, current_bytecode): self.push(interp.space.w_minus_one) + @bytecode_implementation() def pushConstantZeroBytecode(self, interp, current_bytecode): self.push(interp.space.w_zero) + @bytecode_implementation() def pushConstantOneBytecode(self, interp, current_bytecode): self.push(interp.space.w_one) + @bytecode_implementation() def pushConstantTwoBytecode(self, interp, current_bytecode): self.push(interp.space.w_two) + @bytecode_implementation() def pushActiveContextBytecode(self, interp, current_bytecode): self.push(self.w_self()) + @bytecode_implementation() def duplicateTopBytecode(self, interp, current_bytecode): self.push(self.top()) - # send, return bytecodes - def sendLiteralSelectorBytecode(self, interp, current_bytecode): - w_selector = self.w_method().getliteral(current_bytecode & 15) - argcount = ((current_bytecode >> 4) & 3) - 1 - return self._sendSelfSelector(w_selector, argcount, interp) + @bytecode_implementation() + def popStackBytecode(self, interp, current_bytecode): + self.pop() + + @bytecode_implementation(parameter_bytes=1) + def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): + arraySize, popIntoArray = splitter[7, 1](descriptor) + newArray = None + if popIntoArray == 1: + newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) + else: + newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) + self.push(newArray) + + # ====== Extended Push/Pop bytecodes ====== + + def _extendedVariableTypeAndIndex(self, descriptor): + return ((descriptor >> 6) & 3), (descriptor & 63) + + @bytecode_implementation(parameter_bytes=1) + def extendedPushBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.push(self.w_receiver().fetch(self.space, variableIndex)) + elif variableType == 1: + self.push(self.gettemp(variableIndex)) + elif variableType == 2: + self.push(self.w_method().getliteral(variableIndex)) + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + else: + assert 0 + + def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.w_receiver().store(self.space, variableIndex, self.top()) + elif variableType == 1: + self.settemp(variableIndex, self.top()) + elif variableType == 2: + raise IllegalStoreError + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + association.store_value(self.top()) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreBytecode(self, interp, current_bytecode, descriptor): + return self._extendedStoreBytecode(interp, current_bytecode, descriptor) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): + self._extendedStoreBytecode(interp, current_bytecode, descriptor) + self.pop() + + def _extract_index_and_temps(self, index_in_array, index_of_array): + w_indirectTemps = self.gettemp(index_of_array) + return index_in_array, w_indirectTemps + + @bytecode_implementation(parameter_bytes=2) + def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + self.push(w_indirectTemps.at0(self.space, index_in_array)) + + @bytecode_implementation(parameter_bytes=2) + def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.top()) + + @bytecode_implementation(parameter_bytes=2) + def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.pop()) + + @bytecode_implementation(parameter_bytes=3) + def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): + """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ + ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize + "Simulate the action of a 'closure copy' bytecode whose result is the + new BlockClosure for the following code" + | copiedValues | + numCopied > 0 + ifTrue: + [copiedValues := Array new: numCopied. + numCopied to: 1 by: -1 do: + [:i| + copiedValues at: i put: self pop]] + ifFalse: + [copiedValues := nil]. + self push: (BlockClosure new + outerContext: self + startpc: pc + numArgs: numArgs + copiedValues: copiedValues). + self jump: blockSize + """ + + space = self.space + numArgs, numCopied = splitter[4, 4](descriptor) + blockSize = (j << 8) | i + # Create new instance of BlockClosure + w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, + self.pop_and_return_n(numCopied)) + self.push(w_closure) + self._jump(blockSize) + + # ====== Helpers for send/return bytecodes ====== def _sendSelfSelector(self, w_selector, argcount, interp): receiver = self.peek(argcount) @@ -382,6 +543,11 @@ return interp.stack_frame(s_frame) + @objectmodel.specialize.arg(1) + def _sendSelfSelectorSpecial(self, selector, numargs, interp): + w_selector = self.space.get_special_selector(selector) + return self._sendSelfSelector(w_selector, numargs, interp) + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): w_special_selector = self.space.objtable["w_" + special_selector] s_class = receiver.class_shadow(self.space) @@ -435,101 +601,62 @@ raise e def _return(self, return_value, interp, s_return_to): - # for tests, when returning from the top-level context - if s_return_to is None: - raise ReturnFromTopLevel(return_value) - # unfortunately, the assert below is not true for some tests + # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() - + + # ################################################################## if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - raise Return(return_value, s_return_to) + + if s_return_to is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + raise Return(s_return_to, return_value) - def activate_unwind_context(self, interp): - # the first temp is executed flag for both #ensure: and #ifCurtailed: - if self.gettemp(1).is_nil(self.space): - self.settemp(1, self.space.w_true) # mark unwound - self.push(self.gettemp(0)) # push the first argument - try: - self.bytecodePrimValue(interp, 0) - except Return, nlr: - if self is nlr.s_target_context: - return - else: - self.mark_returned() - raise nlr + # ====== Send/Return bytecodes ====== + @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): return self._return(self.w_receiver(), interp, self.s_home().s_sender()) + @bytecode_implementation() def returnTrueBytecode(self, interp, current_bytecode): return self._return(interp.space.w_true, interp, self.s_home().s_sender()) + @bytecode_implementation() def returnFalseBytecode(self, interp, current_bytecode): return self._return(interp.space.w_false, interp, self.s_home().s_sender()) + @bytecode_implementation() def returnNilBytecode(self, interp, current_bytecode): return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) + @bytecode_implementation() def returnTopFromMethodBytecode(self, interp, current_bytecode): return self._return(self.pop(), interp, self.s_home().s_sender()) + @bytecode_implementation() def returnTopFromBlockBytecode(self, interp, current_bytecode): return self._return(self.pop(), interp, self.s_sender()) - def unknownBytecode(self, interp, current_bytecode): - raise MissingBytecode("unknownBytecode") + @bytecode_implementation() + def sendLiteralSelectorBytecode(self, interp, current_bytecode): + w_selector = self.w_method().getliteral(current_bytecode & 15) + argcount = ((current_bytecode >> 4) & 3) - 1 + return self._sendSelfSelector(w_selector, argcount, interp) - def extendedVariableTypeAndIndex(self): - # AK please explain this method (a helper, I guess) - descriptor = self.fetch_next_bytecode() - return ((descriptor >> 6) & 3), (descriptor & 63) - - def extendedPushBytecode(self, interp, current_bytecode): - variableType, variableIndex = self.extendedVariableTypeAndIndex() - if variableType == 0: - self.push(self.w_receiver().fetch(self.space, variableIndex)) - elif variableType == 1: - self.push(self.gettemp(variableIndex)) - elif variableType == 2: - self.push(self.w_method().getliteral(variableIndex)) - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - else: - assert 0 - - def extendedStoreBytecode(self, interp, current_bytecode): - variableType, variableIndex = self.extendedVariableTypeAndIndex() - if variableType == 0: - self.w_receiver().store(self.space, variableIndex, self.top()) - elif variableType == 1: - self.settemp(variableIndex, self.top()) - elif variableType == 2: - raise IllegalStoreError - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - association.store_value(self.top()) - - def extendedStoreAndPopBytecode(self, interp, current_bytecode): - self.extendedStoreBytecode(interp, current_bytecode) - self.pop() - - def getExtendedSelectorArgcount(self): - descriptor = self.fetch_next_bytecode() + def _getExtendedSelectorArgcount(self, descriptor): return ((self.w_method().getliteral(descriptor & 31)), (descriptor >> 5)) - def singleExtendedSendBytecode(self, interp, current_bytecode): - w_selector, argcount = self.getExtendedSelectorArgcount() + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSendBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) return self._sendSelfSelector(w_selector, argcount, interp) - def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode): + @bytecode_implementation(parameter_bytes=2) + def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): from spyvm import error - second = self.fetch_next_bytecode() - third = self.fetch_next_bytecode() opType = second >> 5 if opType == 0: # selfsend @@ -565,86 +692,51 @@ association = wrapper.AssociationWrapper(self.space, w_association) association.store_value(self.top()) - def singleExtendedSuperBytecode(self, interp, current_bytecode): - w_selector, argcount = self.getExtendedSelectorArgcount() + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSuperBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) return self._sendSuperSelector(w_selector, argcount, interp) - def secondExtendedSendBytecode(self, interp, current_bytecode): - descriptor = self.fetch_next_bytecode() + @bytecode_implementation(parameter_bytes=1) + def secondExtendedSendBytecode(self, interp, current_bytecode, descriptor): w_selector = self.w_method().getliteral(descriptor & 63) argcount = descriptor >> 6 return self._sendSelfSelector(w_selector, argcount, interp) - def popStackBytecode(self, interp, current_bytecode): - self.pop() - - # closure bytecodes - def pushNewArrayBytecode(self, interp, current_bytecode): - arraySize, popIntoArray = splitter[7, 1](self.fetch_next_bytecode()) - newArray = None - if popIntoArray == 1: - newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) - else: - newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) - self.push(newArray) - + # ====== Misc ====== + + def _activate_unwind_context(self, interp): + # TODO put the constant somewhere else. + # Primitive 198 is used in BlockClosure >> ensure: + if self.is_closure_context() or self.w_method().primitive() != 198: + self.mark_returned() + return + # The first temp is executed flag for both #ensure: and #ifCurtailed: + if self.gettemp(1).is_nil(self.space): + self.settemp(1, self.space.w_true) # mark unwound + self.push(self.gettemp(0)) # push the first argument + try: + self.bytecodePrimValue(interp, 0) + except Return, nlr: + if self is not nlr.s_target_context: + raise nlr + finally: + self.mark_returned() + + @bytecode_implementation() + def unknownBytecode(self, interp, current_bytecode): + raise MissingBytecode("unknownBytecode") + + @bytecode_implementation() def experimentalBytecode(self, interp, current_bytecode): raise MissingBytecode("experimentalBytecode") - def _extract_index_and_temps(self): - index_in_array = self.fetch_next_bytecode() - index_of_array = self.fetch_next_bytecode() - w_indirectTemps = self.gettemp(index_of_array) - return index_in_array, w_indirectTemps + # ====== Jump bytecodes ====== - def pushRemoteTempLongBytecode(self, interp, current_bytecode): - index_in_array, w_indirectTemps = self._extract_index_and_temps() - self.push(w_indirectTemps.at0(self.space, index_in_array)) - - def storeRemoteTempLongBytecode(self, interp, current_bytecode): - index_in_array, w_indirectTemps = self._extract_index_and_temps() - w_indirectTemps.atput0(self.space, index_in_array, self.top()) - - def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode): - index_in_array, w_indirectTemps = self._extract_index_and_temps() - w_indirectTemps.atput0(self.space, index_in_array, self.pop()) - - def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode): - """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ - ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize - "Simulate the action of a 'closure copy' bytecode whose result is the - new BlockClosure for the following code" - | copiedValues | - numCopied > 0 - ifTrue: - [copiedValues := Array new: numCopied. - numCopied to: 1 by: -1 do: - [:i| - copiedValues at: i put: self pop]] - ifFalse: - [copiedValues := nil]. - self push: (BlockClosure new - outerContext: self - startpc: pc - numArgs: numArgs - copiedValues: copiedValues). - self jump: blockSize - """ - space = self.space - numArgs, numCopied = splitter[4, 4](self.fetch_next_bytecode()) - j = self.fetch_next_bytecode() - i = self.fetch_next_bytecode() - blockSize = (j << 8) | i - #create new instance of BlockClosure - w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, - self.pop_and_return_n(numCopied)) - self.push(w_closure) - self.jump(blockSize) - - def jump(self, offset): + def _jump(self, offset): self.store_pc(self.pc() + offset) - def jumpConditional(self, interp, expecting_true, position): + def _jumpConditional(self, interp, expecting_true, position): if expecting_true: w_expected = interp.space.w_true w_alternative = interp.space.w_false @@ -652,34 +744,42 @@ w_alternative = interp.space.w_true w_expected = interp.space.w_false - # Don't check the class, just compare with only two instances. + # Don't check the class, just compare with only two Boolean instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): - self.jump(position) + self._jump(position) elif not w_alternative.is_same_object(w_bool): self._mustBeBoolean(interp, w_bool) - def shortJumpPosition(self, current_bytecode): + def _shortJumpOffset(self, current_bytecode): return (current_bytecode & 7) + 1 + def _longJumpOffset(self, current_bytecode, parameter): + return ((current_bytecode & 3) << 8) + parameter + + @bytecode_implementation() def shortUnconditionalJumpBytecode(self, interp, current_bytecode): - self.jump(self.shortJumpPosition(current_bytecode)) + self._jump(self._shortJumpOffset(current_bytecode)) + @bytecode_implementation() def shortConditionalJumpBytecode(self, interp, current_bytecode): # The conditional jump is "jump on false" - self.jumpConditional(interp, False, self.shortJumpPosition(current_bytecode)) + self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) - def longUnconditionalJumpBytecode(self, interp, current_bytecode): - self.jump((((current_bytecode & 7) - 4) << 8) + self.fetch_next_bytecode()) + @bytecode_implementation(parameter_bytes=1) + def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): + offset = (((current_bytecode & 7) - 4) << 8) + parameter + self._jump(offset) - def longJumpPosition(self, current_bytecode): - return ((current_bytecode & 3) << 8) + self.fetch_next_bytecode() + @bytecode_implementation(parameter_bytes=1) + def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) - def longJumpIfTrueBytecode(self, interp, current_bytecode): - self.jumpConditional(interp, True, self.longJumpPosition(current_bytecode)) + @bytecode_implementation(parameter_bytes=1) + def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) - def longJumpIfFalseBytecode(self, interp, current_bytecode): - self.jumpConditional(interp, False, self.longJumpPosition(current_bytecode)) + # ====== Bytecodes implemented with primitives and message sends ====== bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) @@ -698,61 +798,25 @@ bytecodePrimBitAnd = make_call_primitive_bytecode(primitives.BIT_AND, "bitAnd:", 1) bytecodePrimBitOr = make_call_primitive_bytecode(primitives.BIT_OR, "bitOr:", 1) - @objectmodel.specialize.arg(1) - def _sendSelfSelectorSpecial(self, selector, numargs, interp): - w_selector = self.space.get_special_selector(selector) - return self._sendSelfSelector(w_selector, numargs, interp) + bytecodePrimAt = make_send_selector_bytecode("at:", 1) + bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) + bytecodePrimSize = make_send_selector_bytecode("size", 0) + bytecodePrimNext = make_send_selector_bytecode("next", 0) + bytecodePrimNextPut = make_send_selector_bytecode("nextPut:", 1) + bytecodePrimAtEnd = make_send_selector_bytecode("atEnd", 0) - def bytecodePrimAt(self, interp, current_bytecode): - # n.b.: depending on the type of the receiver, this may invoke - # primitives.AT, primitives.STRING_AT, or something else for all - # I know. - return self._sendSelfSelectorSpecial("at:", 1, interp) - - def bytecodePrimAtPut(self, interp, current_bytecode): - # n.b. as above - return self._sendSelfSelectorSpecial("at:put:", 2, interp) - - def bytecodePrimSize(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("size", 0, interp) - - def bytecodePrimNext(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("next", 0, interp) - - def bytecodePrimNextPut(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("nextPut:", 1, interp) - - def bytecodePrimAtEnd(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("atEnd", 0, interp) - - def bytecodePrimEquivalent(self, interp, current_bytecode): - # short-circuit: classes cannot override the '==' method, - # which cannot fail - primitives.prim_table[primitives.EQUIVALENT](interp, self, 1) - - def bytecodePrimClass(self, interp, current_bytecode): - # short-circuit: classes cannot override the 'class' method, - # which cannot fail - primitives.prim_table[primitives.CLASS](interp, self, 0) + bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) + bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) - def bytecodePrimDo(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("do:", 1, interp) - - def bytecodePrimNew(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("new", 0, interp) - - def bytecodePrimNewWithArg(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("new:", 1, interp) - - def bytecodePrimPointX(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("x", 0, interp) - - def bytecodePrimPointY(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial("y", 0, interp) + bytecodePrimDo = make_send_selector_bytecode("do:", 1) + bytecodePrimNew = make_send_selector_bytecode("new", 0) + bytecodePrimNewWithArg = make_send_selector_bytecode("new:", 1) + bytecodePrimPointX = make_send_selector_bytecode("x", 0) + bytecodePrimPointY = make_send_selector_bytecode("y", 0) BYTECODE_RANGES = [ ( 0, 15, "pushReceiverVariableBytecode"), diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1008,7 +1008,7 @@ interp._loop = True interp.loop_bytecodes(w_method.create_frame(space, space.wrap_int(0), [])) except interpreter.StackOverflow, e: - assert isinstance(e.s_context, shadow.MethodContextShadow) + assert isinstance(e.s_new_context, shadow.MethodContextShadow) except interpreter.ReturnFromTopLevel, e: assert False @@ -1016,8 +1016,8 @@ def stack_frame(self, w_frame, may_interrupt=True): stack_depth = self.max_stack_depth - self.remaining_stack_depth for i in range(stack_depth + 1): - assert sys._getframe(4 + i * 6).f_code.co_name == 'loop_bytecodes' - assert sys._getframe(5 + stack_depth * 6).f_code.co_name == 'loop' + assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' + assert sys._getframe(6 + stack_depth * 7).f_code.co_name == 'loop' return interpreter.Interpreter.stack_frame(self, w_frame) def test_actual_stackdepth(): From noreply at buildbot.pypy.org Wed May 14 14:51:15 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 14 May 2014 14:51:15 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Implemented refactoring to handle the pc in a virtualized way, instead of maintining it inside the context object. Message-ID: <20140514125115.380851C0320@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r827:1900ca207c39 Date: 2014-05-14 14:36 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1900ca207c39/ Log: Implemented refactoring to handle the pc in a virtualized way, instead of maintining it inside the context object. See changes Interpreter.loop_bytecodes() for the main impact of this refactoring. Everything else is just to make that work. bytecode implementations get the pc and return the modified pc. A ContextSwitchException causes the pc to be stored into the context object, so it can be resumed later. This might break Smalltalk semantics at some places, but we hope this improves virtualizability of frame objects. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -77,64 +77,59 @@ s_new_context = s_sender while s_new_context is not nlr.s_target_context: s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) + s_new_context._activate_unwind_context(self, s_new_context.pc()) s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: if self.trace: print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() + # TODO Idea: check how process switches affect performance. The tracing should not + # continue when the process is changed (it's probably rare to have the exact same interleaving + # of multiple processes). How do we make sure that a bridge is created here? s_new_context = p.s_new_context - def loop_bytecodes(self, s_context, may_context_switch=True): - old_pc = 0 + def loop_bytecodes(self, s_context, fresh_context=False, may_context_switch=True): if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.w_method() + if jit.promote(fresh_context): + pc = 0 + else: + pc = s_context.pc() while True: - pc = s_context.pc() + self.jit_driver.jit_merge_point(pc=pc, self=self, method=method, s_context=s_context) + old_pc = pc + pc = self.step(s_context, pc) if pc < old_pc: if jit.we_are_jitted(): self.jitted_check_for_interrupt(s_context) - self.jit_driver.can_enter_jit( - pc=pc, self=self, method=method, - s_context=s_context) - old_pc = pc - self.jit_driver.jit_merge_point( - pc=pc, self=self, method=method, - s_context=s_context) - try: - self.step(s_context) - except Return, nlr: - if nlr.s_target_context is not s_context: - s_context._activate_unwind_context(self) - raise nlr - else: - s_context.push(nlr.value) + self.jit_driver.can_enter_jit(pc=pc, self=self, method=method, s_context=s_context) # This is just a wrapper around loop_bytecodes that handles the remaining_stack_depth mechanism - def stack_frame(self, s_new_frame, may_context_switch=True): + def stack_frame(self, s_new_frame, may_context_switch=True, fresh_context=False): if self.remaining_stack_depth <= 1: raise StackOverflow(s_new_frame) self.remaining_stack_depth -= 1 try: - self.loop_bytecodes(s_new_frame, may_context_switch) + self.loop_bytecodes(s_new_frame, may_context_switch=may_context_switch, fresh_context=fresh_context) finally: self.remaining_stack_depth += 1 - def step(self, context): - bytecode = context.fetch_next_bytecode() + def step(self, context, pc): + bytecode = context.fetch_bytecode(pc) + pc += 1 for entry in UNROLLING_BYTECODE_RANGES: if len(entry) == 2: bc, methname = entry if bytecode == bc: - return getattr(context, methname)(self, bytecode) + return getattr(context, methname)(self, bytecode, pc) else: start, stop, methname = entry if start <= bytecode <= stop: - return getattr(context, methname)(self, bytecode) - assert 0, "unreachable" + return getattr(context, methname)(self, bytecode, pc) + assert False, "unreachable" # ============== Methods for handling user interrupts ============== @@ -241,27 +236,52 @@ # This is a decorator for bytecode implementation methods. # parameter_bytes=N means N additional bytes are fetched as parameters. -def bytecode_implementation(parameter_bytes=0): +# jump=True means the pc is changed in an unpredictable way. +# The implementation method must additionally handle the pc. +# needs_pc=True means the bytecode implementation required the pc, but will not change it. +def bytecode_implementation(parameter_bytes=0, jump=False, needs_pc=False): def bytecode_implementation_decorator(actual_implementation_method): from rpython.rlib.unroll import unrolling_zero @jit.unroll_safe - def bytecode_implementation_wrapper(self, interp, current_bytecode): + def bytecode_implementation_wrapper(self, interp, current_bytecode, pc): parameters = () i = unrolling_zero while i < parameter_bytes: - parameters += (self.fetch_next_bytecode(), ) + parameters += (self.fetch_bytecode(pc), ) + pc += 1 i = i + 1 + if jump or needs_pc: + parameters += (pc, ) # This is a good place to step through bytecodes. # import pdb; pdb.set_trace() - return actual_implementation_method(self, interp, current_bytecode, *parameters) + try: + jumped_pc = actual_implementation_method(self, interp, current_bytecode, *parameters) + if jump: + return jumped_pc + else: + return pc + except ContextSwitchException: + # We are returning to the loop() method, so the virtualized pc variable must be written to the context + # objects. + # This can be very bad for performance since it forces the jit to heap-allocate virtual objects. + # Bytecodes with jump=True cannot cause any Exception, so we can safely store pc (and not jumped_pc). + self.store_pc(pc) + raise + except Return, ret: + if ret.s_target_context is not self: + self._activate_unwind_context(interp, pc) + raise ret + else: + self.push(ret.value) + return pc bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name return bytecode_implementation_wrapper return bytecode_implementation_decorator def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): func = primitives.prim_table[primitive] - @bytecode_implementation() - def callPrimitive(self, interp, current_bytecode): + @bytecode_implementation(needs_pc=True) + def callPrimitive(self, interp, current_bytecode, pc): # WARNING: this is used for bytecodes for which it is safe to # directly call the primitive. In general, it is not safe: for # example, depending on the type of the receiver, bytecodePrimAt @@ -270,28 +290,32 @@ # The rule of thumb is that primitives with only int and float # in their unwrap_spec are safe. try: - return func(interp, self, argcount) + if store_pc: + # The pc is stored because some primitives read the pc from the frame object. + # Only do this selectively to avoid forcing virtual frame object to the heap. + self.store_pc(pc) + func(interp, self, argcount) except primitives.PrimitiveFailedError: - pass - return self._sendSelfSelectorSpecial(selector, argcount, interp) + self._sendSelfSelectorSpecial(interp, selector, argcount) callPrimitive.func_name = "callPrimitive_%s" % func.func_name return callPrimitive def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): - @bytecode_implementation() - def callClassbasedPrimitive(self, interp, current_bytecode): + @bytecode_implementation(needs_pc=True) + def callClassbasedPrimitive(self, interp, current_bytecode, pc): rcvr = self.peek(argcount) receiver_class = rcvr.getclass(self.space) try: if receiver_class is getattr(self.space, a_class_name): func = primitives.prim_table[a_primitive] - return func(interp, self, argcount) + func(interp, self, argcount) elif receiver_class is getattr(self.space, alternative_class_name): func = primitives.prim_table[alternative_primitive] - return func(interp, self, argcount) + func(interp, self, argcount) + else: + self._sendSelfSelectorSpecial(interp, selector, argcount) except primitives.PrimitiveFailedError: - pass - return self._sendSelfSelectorSpecial(selector, argcount, interp) + self._sendSelfSelectorSpecial(interp, selector, argcount) callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector return callClassbasedPrimitive @@ -300,7 +324,7 @@ func = primitives.prim_table[primitive_index] @bytecode_implementation() def quick_call_primitive_bytecode(self, interp, current_bytecode): - return func(interp, self, argcount) + func(interp, self, argcount) return quick_call_primitive_bytecode # This is for bytecodes that actually implement a simple message-send. @@ -308,7 +332,7 @@ def make_send_selector_bytecode(selector, argcount): @bytecode_implementation() def selector_bytecode(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial(selector, argcount, interp) + self._sendSelfSelectorSpecial(interp, selector, argcount) selector_bytecode.func_name = "selector_bytecode_%s" % selector return selector_bytecode @@ -473,8 +497,8 @@ index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) w_indirectTemps.atput0(self.space, index_in_array, self.pop()) - @bytecode_implementation(parameter_bytes=3) - def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): + @bytecode_implementation(parameter_bytes=3, jump=True) + def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i, pc): """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize "Simulate the action of a 'closure copy' bytecode whose result is the @@ -500,10 +524,10 @@ numArgs, numCopied = splitter[4, 4](descriptor) blockSize = (j << 8) | i # Create new instance of BlockClosure - w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, - self.pop_and_return_n(numCopied)) + w_closure = space.newClosure(self.w_self(), pc, numArgs, self.pop_and_return_n(numCopied)) self.push(w_closure) - self._jump(blockSize) + assert blockSize >= 0 + return self._jump(blockSize, pc) # ====== Helpers for send/return bytecodes ====== @@ -543,8 +567,8 @@ return interp.stack_frame(s_frame) - @objectmodel.specialize.arg(1) - def _sendSelfSelectorSpecial(self, selector, numargs, interp): + @objectmodel.specialize.arg(2) + def _sendSelfSelectorSpecial(self, interp, selector, numargs): w_selector = self.space.get_special_selector(selector) return self._sendSelfSelector(w_selector, numargs, interp) @@ -705,7 +729,7 @@ # ====== Misc ====== - def _activate_unwind_context(self, interp): + def _activate_unwind_context(self, interp, current_pc): # TODO put the constant somewhere else. # Primitive 198 is used in BlockClosure >> ensure: if self.is_closure_context() or self.w_method().primitive() != 198: @@ -716,7 +740,7 @@ self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument try: - self.bytecodePrimValue(interp, 0) + self.bytecodePrimValue(interp, 0, current_pc) except Return, nlr: if self is not nlr.s_target_context: raise nlr @@ -733,10 +757,10 @@ # ====== Jump bytecodes ====== - def _jump(self, offset): - self.store_pc(self.pc() + offset) + def _jump(self, offset, pc): + return pc + offset - def _jumpConditional(self, interp, expecting_true, position): + def _jumpConditional(self, interp, expecting_true, position, pc): if expecting_true: w_expected = interp.space.w_true w_alternative = interp.space.w_false @@ -747,9 +771,10 @@ # Don't check the class, just compare with only two Boolean instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): - self._jump(position) + return self._jump(position, pc) elif not w_alternative.is_same_object(w_bool): self._mustBeBoolean(interp, w_bool) + return pc def _shortJumpOffset(self, current_bytecode): return (current_bytecode & 7) + 1 @@ -757,27 +782,27 @@ def _longJumpOffset(self, current_bytecode, parameter): return ((current_bytecode & 3) << 8) + parameter - @bytecode_implementation() - def shortUnconditionalJumpBytecode(self, interp, current_bytecode): - self._jump(self._shortJumpOffset(current_bytecode)) + @bytecode_implementation(jump=True) + def shortUnconditionalJumpBytecode(self, interp, current_bytecode, pc): + return self._jump(self._shortJumpOffset(current_bytecode), pc) - @bytecode_implementation() - def shortConditionalJumpBytecode(self, interp, current_bytecode): + @bytecode_implementation(jump=True) + def shortConditionalJumpBytecode(self, interp, current_bytecode, pc): # The conditional jump is "jump on false" - self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) + return self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode), pc) - @bytecode_implementation(parameter_bytes=1) - def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): + @bytecode_implementation(parameter_bytes=1, jump=True) + def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter, pc): offset = (((current_bytecode & 7) - 4) << 8) + parameter - self._jump(offset) + return self._jump(offset, pc) - @bytecode_implementation(parameter_bytes=1) - def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) + @bytecode_implementation(parameter_bytes=1, jump=True) + def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter, pc): + return self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter), pc) - @bytecode_implementation(parameter_bytes=1) - def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) + @bytecode_implementation(parameter_bytes=1, jump=True) + def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter, pc): + return self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter), pc) # ====== Bytecodes implemented with primitives and message sends ====== @@ -808,7 +833,7 @@ bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) - bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) + bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1, store_pc=True) bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -22,7 +22,7 @@ def step_in_interp(ctxt): # due to missing resets in between tests interp._loop = False try: - retval = interp.step(ctxt) + retval = interp.step_context(ctxt) if retval is not None: return retval.w_self() except interpreter.Return, nlr: @@ -1013,7 +1013,7 @@ assert False class StackTestInterpreter(TestInterpreter): - def stack_frame(self, w_frame, may_interrupt=True): + def stack_frame(self, w_frame, may_interrupt=True, fresh_context=False): stack_depth = self.max_stack_depth - self.remaining_stack_depth for i in range(stack_depth + 1): assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -372,12 +372,12 @@ assert isinstance(s_ctx, shadow.MethodContextShadow) assert s_ctx.top().is_same_object(space.w_true) - interp.step(s_ctx) - interp.step(s_ctx) + interp.step_context(s_ctx) + interp.step_context(s_ctx) assert s_ctx.top().value == 1 - interp.step(s_ctx) + interp.step_context(s_ctx) assert s_ctx.top().value == 2 - interp.step(s_ctx) + interp.step_context(s_ctx) assert s_ctx.top().value == 3 def test_primitive_perform_with_args(): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -673,7 +673,7 @@ def quick_check_for_interrupt(s_frame, dec=1): raise Context_switched - def step(s_frame): + def step(s_frame, pc): raise Stepping w_frame, s_initial_context = new_frame("") diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -81,12 +81,20 @@ self._loop = True return interpreter.Interpreter.loop(self, w_active_context) - def stack_frame(self, s_new_frame, may_context_switch=True): + def stack_frame(self, s_new_frame, may_context_switch=True, fresh_context=False): if not self._loop: + self.s_new_frame = s_new_frame return s_new_frame # this test is done to not loop in test, # but rather step just once where wanted - return interpreter.Interpreter.stack_frame(self, s_new_frame, may_context_switch) - + return interpreter.Interpreter.stack_frame(self, s_new_frame, + may_context_switch=may_context_switch, fresh_context=fresh_context) + + def step_context(self, s_context): + self.s_new_frame = None + new_pc = self.step(s_context, s_context.pc()) + s_context.store_pc(new_pc) + return self.s_new_frame # None, if we're still in the same frame. + class BootstrappedObjSpace(objspace.ObjSpace): def bootstrap(self): From noreply at buildbot.pypy.org Wed May 14 15:40:46 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 May 2014 15:40:46 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: assert not true anymore Message-ID: <20140514134046.C9DA21C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71506:d1454093dd48 Date: 2014-05-14 15:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d1454093dd48/ Log: assert not true anymore diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -183,7 +183,7 @@ } else { /* In an atomic transaction */ - assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); + //assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); counter = v_counter; } From noreply at buildbot.pypy.org Wed May 14 18:02:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 14 May 2014 18:02:15 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix annoying little crasher where a collection occurs while we wait to become Message-ID: <20140514160215.F3DE31D237F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1210:ae21b6a2a892 Date: 2014-05-14 18:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/ae21b6a2a892/ Log: fix annoying little crasher where a collection occurs while we wait to become inevitable. this caused the marker_inev fetched before that not to be traced, but we may still read it afterwards. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -209,9 +209,7 @@ STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; -#ifndef NDEBUG - STM_PSEGMENT->marker_inev[1] = 99999999999999999L; -#endif + STM_PSEGMENT->marker_inev[1] = 0; if (jmpbuf == NULL) marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR @@ -481,6 +479,9 @@ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; + /* marker_inev is not needed anymore */ + STM_PSEGMENT->marker_inev[1] = 0; + /* reset these lists to NULL for the next transaction */ LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->large_overflow_objects); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -431,7 +431,7 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } - if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + if (get_priv_segment(j)->marker_inev[1]) { uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; mark_visit_object((object_t *)marker_inev_obj, base); } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -218,9 +218,9 @@ acquire_privatization_lock(); synchronize_object_now(obj); release_privatization_lock(); + } else { + LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } - else - LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } /* the list could have moved while appending */ @@ -244,7 +244,7 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + if (STM_PSEGMENT->marker_inev[1]) { uintptr_t *pmarker_inev_obj = (uintptr_t *) REAL_ADDRESS(STM_SEGMENT->segment_base, &STM_PSEGMENT->marker_inev[1]); @@ -341,8 +341,9 @@ collect_modified_old_objects(); num_old = 0; } - else + else { num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; + } collect_roots_from_markers(num_old); From noreply at buildbot.pypy.org Wed May 14 18:59:47 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 18:59:47 +0200 (CEST) Subject: [pypy-commit] pypy default: unsplit some class defs Message-ID: <20140514165947.EC9AE1C0320@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71507:8a8fadaead94 Date: 2014-05-14 17:59 +0100 http://bitbucket.org/pypy/pypy/changeset/8a8fadaead94/ Log: unsplit some class defs diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -37,91 +37,6 @@ startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) - -class AbstractCharRepr(AbstractStringRepr): - def rtype_method_lower(self, hop): - char_repr = hop.args_r[0].char_repr - v_chr, = hop.inputargs(char_repr) - hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_lower_char, v_chr) - - def rtype_method_upper(self, hop): - char_repr = hop.args_r[0].char_repr - v_chr, = hop.inputargs(char_repr) - hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_upper_char, v_chr) - - -class AbstractUniCharRepr(AbstractStringRepr): - pass - -class AbstractUnicodeRepr(AbstractStringRepr): - - def __init__(self, *args): - AbstractStringRepr.__init__(self, *args) - self.runicode_encode_utf_8 = None - - def ensure_ll_encode_utf8(self): - from rpython.rlib.runicode import unicode_encode_utf_8_impl - self.runicode_encode_utf_8 = func_with_new_name( - unicode_encode_utf_8_impl, 'runicode_encode_utf_8') - - def rtype_method_upper(self, hop): - raise TypeError("Cannot do toupper on unicode string") - - def rtype_method_lower(self, hop): - raise TypeError("Cannot do tolower on unicode string") - - @jit.elidable - def ll_encode_utf8(self, ll_s): - from rpython.rtyper.annlowlevel import hlunicode - s = hlunicode(ll_s) - assert s is not None - bytes = self.runicode_encode_utf_8( - s, len(s), 'strict', - errorhandler=self.ll_raise_unicode_exception_encode, - allow_surrogates=False) - return self.ll.llstr(bytes) - - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, - startingpos, endingpos): - raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) - -class __extend__(annmodel.SomeString): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import string_repr - return string_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeUnicodeString): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import unicode_repr - return unicode_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeChar): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import char_repr - return char_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeUnicodeCodePoint): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import unichar_repr - return unichar_repr - - def rtyper_makekey(self): - return self.__class__, - - -class __extend__(AbstractStringRepr): - def _str_reprs(self, hop): return hop.args_r[0].repr, hop.args_r[1].repr @@ -407,7 +322,7 @@ return hop.inputconst(hop.r_result, hop.s_result.const) repr = hop.args_r[0].repr v_str = hop.inputarg(repr, 0) - if repr == hop.r_result: # the argument is a unicode string already + if repr == hop.r_result: # the argument is a unicode string already hop.exception_cannot_occur() return v_str hop.exception_is_here() @@ -447,7 +362,46 @@ else: return self.ll.ll_constant('None') -class __extend__(AbstractUnicodeRepr): + def rtype_getslice(r_str, hop): + string_repr = r_str.repr + v_str = hop.inputarg(string_repr, arg=0) + kind, vlist = hop.decompose_slice_args() + ll_fn = getattr(r_str.ll, 'll_stringslice_%s' % (kind,)) + return hop.gendirectcall(ll_fn, v_str, *vlist) + + +class AbstractUnicodeRepr(AbstractStringRepr): + + def __init__(self, *args): + AbstractStringRepr.__init__(self, *args) + self.runicode_encode_utf_8 = None + + def ensure_ll_encode_utf8(self): + from rpython.rlib.runicode import unicode_encode_utf_8_impl + self.runicode_encode_utf_8 = func_with_new_name( + unicode_encode_utf_8_impl, 'runicode_encode_utf_8') + + def rtype_method_upper(self, hop): + raise TypeError("Cannot do toupper on unicode string") + + def rtype_method_lower(self, hop): + raise TypeError("Cannot do tolower on unicode string") + + @jit.elidable + def ll_encode_utf8(self, ll_s): + from rpython.rtyper.annlowlevel import hlunicode + s = hlunicode(ll_s) + assert s is not None + bytes = self.runicode_encode_utf_8( + s, len(s), 'strict', + errorhandler=self.ll_raise_unicode_exception_encode, + allow_surrogates=False) + return self.ll.llstr(bytes) + + def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + startingpos, endingpos): + raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) + def rtype_method_encode(self, hop): if not hop.args_s[1].is_constant(): raise TyperError("encoding must be constant") @@ -468,6 +422,117 @@ else: raise TyperError("encoding %s not implemented" % (encoding, )) +class BaseCharReprMixin(object): + + def convert_const(self, value): + if not isinstance(value, str) or len(value) != 1: + raise TyperError("not a character: %r" % (value,)) + return value + + def get_ll_eq_function(self): + return None + + def get_ll_hash_function(self): + return self.ll.ll_char_hash + + get_ll_fasthash_function = get_ll_hash_function + + def rtype_len(_, hop): + return hop.inputconst(Signed, 1) + + def rtype_bool(_, hop): + assert not hop.args_s[0].can_be_None + return hop.inputconst(Bool, True) + + def rtype_ord(_, hop): + repr = hop.args_r[0].char_repr + vlist = hop.inputargs(repr) + return hop.genop('cast_char_to_int', vlist, resulttype=Signed) + + def _rtype_method_isxxx(_, llfn, hop): + repr = hop.args_r[0].char_repr + vlist = hop.inputargs(repr) + hop.exception_cannot_occur() + return hop.gendirectcall(llfn, vlist[0]) + + def rtype_method_isspace(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isspace, hop) + + def rtype_method_isdigit(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isdigit, hop) + + def rtype_method_isalpha(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isalpha, hop) + + def rtype_method_isalnum(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isalnum, hop) + + def rtype_method_isupper(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isupper, hop) + + def rtype_method_islower(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_islower, hop) + + +class AbstractCharRepr(BaseCharReprMixin, AbstractStringRepr): + def rtype_method_lower(self, hop): + char_repr = hop.args_r[0].char_repr + v_chr, = hop.inputargs(char_repr) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_lower_char, v_chr) + + def rtype_method_upper(self, hop): + char_repr = hop.args_r[0].char_repr + v_chr, = hop.inputargs(char_repr) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_upper_char, v_chr) + + def ll_str(self, ch): + return self.ll.ll_chr2str(ch) + + +class AbstractUniCharRepr(BaseCharReprMixin, AbstractStringRepr): + + def ll_str(self, ch): + # xxx suboptimal, maybe + return str(unicode(ch)) + + def ll_unicode(self, ch): + return unicode(ch) + + +class __extend__(annmodel.SomeString): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import string_repr + return string_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeUnicodeString): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import unicode_repr + return unicode_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeChar): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import char_repr + return char_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeUnicodeCodePoint): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import unichar_repr + return unichar_repr + + def rtyper_makekey(self): + return self.__class__, + class __extend__(pairtype(AbstractStringRepr, Repr)): def rtype_mod((r_str, _), hop): @@ -475,6 +540,7 @@ # overriding rtype_mod() below return r_str.ll.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + class __extend__(pairtype(AbstractStringRepr, FloatRepr)): def rtype_mod(_, hop): from rpython.rtyper.lltypesystem.rstr import do_stringformat @@ -520,15 +586,6 @@ rtype_inplace_mul = rtype_mul -class __extend__(AbstractStringRepr): - - def rtype_getslice(r_str, hop): - string_repr = r_str.repr - v_str = hop.inputarg(string_repr, arg=0) - kind, vlist = hop.decompose_slice_args() - ll_fn = getattr(r_str.ll, 'll_stringslice_%s' % (kind,)) - return hop.gendirectcall(ll_fn, v_str, *vlist) - class __extend__(pairtype(AbstractStringRepr, AbstractStringRepr)): def rtype_add((r_str1, r_str2), hop): str1_repr = r_str1.repr @@ -592,65 +649,6 @@ return hop.gendirectcall(r_str.ll.ll_contains, v_str, v_chr) -class __extend__(AbstractCharRepr): - def ll_str(self, ch): - return self.ll.ll_chr2str(ch) - -class __extend__(AbstractUniCharRepr): - def ll_str(self, ch): - # xxx suboptimal, maybe - return str(unicode(ch)) - - def ll_unicode(self, ch): - return unicode(ch) - -class __extend__(AbstractCharRepr, - AbstractUniCharRepr): - - def convert_const(self, value): - if not isinstance(value, str) or len(value) != 1: - raise TyperError("not a character: %r" % (value,)) - return value - - def get_ll_eq_function(self): - return None - - def get_ll_hash_function(self): - return self.ll.ll_char_hash - - get_ll_fasthash_function = get_ll_hash_function - - def rtype_len(_, hop): - return hop.inputconst(Signed, 1) - - def rtype_bool(_, hop): - assert not hop.args_s[0].can_be_None - return hop.inputconst(Bool, True) - - def rtype_ord(_, hop): - repr = hop.args_r[0].char_repr - vlist = hop.inputargs(repr) - return hop.genop('cast_char_to_int', vlist, resulttype=Signed) - - def _rtype_method_isxxx(_, llfn, hop): - repr = hop.args_r[0].char_repr - vlist = hop.inputargs(repr) - hop.exception_cannot_occur() - return hop.gendirectcall(llfn, vlist[0]) - - def rtype_method_isspace(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isspace, hop) - def rtype_method_isdigit(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isdigit, hop) - def rtype_method_isalpha(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isalpha, hop) - def rtype_method_isalnum(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isalnum, hop) - def rtype_method_isupper(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isupper, hop) - def rtype_method_islower(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_islower, hop) - class __extend__(pairtype(AbstractCharRepr, IntegerRepr), pairtype(AbstractUniCharRepr, IntegerRepr)): From noreply at buildbot.pypy.org Wed May 14 19:24:43 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 May 2014 19:24:43 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: consolidate overload tests with CPython/cppyy and add void* use for unknown classes Message-ID: <20140514172443.ED18D1C044C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71508:a2be4d10d7dd Date: 2014-05-13 11:34 -0700 http://bitbucket.org/pypy/pypy/changeset/a2be4d10d7dd/ Log: consolidate overload tests with CPython/cppyy and add void* use for unknown classes diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -751,6 +751,10 @@ if compound == "&": return _converters['const unsigned int&'](space, default) return _converters['unsigned int'](space, default) + elif compound: + # "user knows best": allow any ref or ptr unchecked as-if it was void* + # TODO: issue python warning + return _converters['void*'](space, name) # 5) void converter, which fails on use # diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -17,10 +17,11 @@ pass # overload priorities: lower is preferred -priority = { 'void*' : 100, - 'void**' : 100, - 'float' : 30, - 'double' : 10, } +priority = { 'void*' : 100, + 'void**' : 100, + 'float' : 30, + 'long double' : 15, + 'double' : 10, } from rpython.rlib.listsort import make_timsort_class CPPMethodBaseTimSort = make_timsort_class() @@ -202,7 +203,7 @@ if self.converters is None: try: self._setup(cppthis) - except Exception, e: + except Exception: pass # some calls, e.g. for ptr-ptr or reference need a local array to store data for @@ -271,6 +272,7 @@ self.uses_local = True break + return # Each CPPMethod corresponds one-to-one to a C++ equivalent and cppthis # has been offset to the matching class. Hence, the libffi pointer is # uniquely defined and needs to be setup only once. @@ -392,8 +394,12 @@ def priority(self): total_arg_priority = 0 - for p in [priority.get(arg_type, 0) for arg_type, arg_dflt in self.arg_defs]: - total_arg_priority += p + for arg_type, arg_dflt in self.arg_defs: + try: + total_arg_priority += priority[arg_type] + except KeyError: + if arg_type[-1] == '&': # causes ptr preference over ref + total_arg_priority += 1 return total_arg_priority def __del__(self): @@ -788,7 +794,7 @@ for f in overload.functions: if 0 < f.signature().find(sig): return W_CPPOverload(self.space, self, [f]) - raise OperationError(self.space.w_TypeError, self.space.wrap("no overload matches signature")) + raise OperationError(self.space.w_LookupError, self.space.wrap("no overload matches signature")) def missing_attribute_error(self, name): return OperationError( diff --git a/pypy/module/cppyy/test/overloads.cxx b/pypy/module/cppyy/test/overloads.cxx --- a/pypy/module/cppyy/test/overloads.cxx +++ b/pypy/module/cppyy/test/overloads.cxx @@ -1,56 +1,69 @@ #include "overloads.h" -a_overload::a_overload() { i1 = 42; i2 = -1; } +//=========================================================================== +OverloadA::OverloadA() { i1 = 42; i2 = -1; } -ns_a_overload::a_overload::a_overload() { i1 = 88; i2 = -34; } -int ns_a_overload::b_overload::f(const std::vector* v) { return (*v)[0]; } +NamespaceA::OverloadA::OverloadA() { i1 = 88; i2 = -34; } +int NamespaceA::OverloadB::f(const std::vector* v) { return (*v)[0]; } -ns_b_overload::a_overload::a_overload() { i1 = -33; i2 = 89; } +NamespaceB::OverloadA::OverloadA() { i1 = -33; i2 = 89; } -b_overload::b_overload() { i1 = -2; i2 = 13; } +OverloadB::OverloadB() { i1 = -2; i2 = 13; } -c_overload::c_overload() {} -int c_overload::get_int(a_overload* a) { return a->i1; } -int c_overload::get_int(ns_a_overload::a_overload* a) { return a->i1; } -int c_overload::get_int(ns_b_overload::a_overload* a) { return a->i1; } -int c_overload::get_int(short* p) { return *p; } -int c_overload::get_int(b_overload* b) { return b->i2; } -int c_overload::get_int(int* p) { return *p; } -d_overload::d_overload() {} -int d_overload::get_int(int* p) { return *p; } -int d_overload::get_int(b_overload* b) { return b->i2; } -int d_overload::get_int(short* p) { return *p; } -int d_overload::get_int(ns_b_overload::a_overload* a) { return a->i1; } -int d_overload::get_int(ns_a_overload::a_overload* a) { return a->i1; } -int d_overload::get_int(a_overload* a) { return a->i1; } +//=========================================================================== +OverloadC::OverloadC() {} +int OverloadC::get_int(OverloadA* a) { return a->i1; } +int OverloadC::get_int(NamespaceA::OverloadA* a) { return a->i1; } +int OverloadC::get_int(NamespaceB::OverloadA* a) { return a->i1; } +int OverloadC::get_int(short* p) { return *p; } +int OverloadC::get_int(OverloadB* b) { return b->i2; } +int OverloadC::get_int(int* p) { return *p; } -more_overloads::more_overloads() {} -std::string more_overloads::call(const aa_ol&) { return "aa_ol"; } -std::string more_overloads::call(const bb_ol&, void* n) { n = 0; return "bb_ol"; } -std::string more_overloads::call(const cc_ol&) { return "cc_ol"; } -std::string more_overloads::call(const dd_ol&) { return "dd_ol"; } +//=========================================================================== +OverloadD::OverloadD() {} +int OverloadD::get_int(int* p) { return *p; } +int OverloadD::get_int(OverloadB* b) { return b->i2; } +int OverloadD::get_int(short* p) { return *p; } +int OverloadD::get_int(NamespaceB::OverloadA* a) { return a->i1; } +int OverloadD::get_int(NamespaceA::OverloadA* a) { return a->i1; } +int OverloadD::get_int(OverloadA* a) { return a->i1; } -std::string more_overloads::call_unknown(const dd_ol&) { return "dd_ol"; } -std::string more_overloads::call(double) { return "double"; } -std::string more_overloads::call(int) { return "int"; } -std::string more_overloads::call1(int) { return "int"; } -std::string more_overloads::call1(double) { return "double"; } +//=========================================================================== +OlBB* get_OlBB() { return (OlBB*)0; } +OlDD* get_OlDD() { return (OlDD*)0; } -more_overloads2::more_overloads2() {} -std::string more_overloads2::call(const bb_ol&) { return "bb_olref"; } -std::string more_overloads2::call(const bb_ol*) { return "bb_olptr"; } +//=========================================================================== +MoreOverloads::MoreOverloads() {} +std::string MoreOverloads::call(const OlAA&) { return "OlAA"; } +std::string MoreOverloads::call(const OlBB&, void* n) { n = 0; return "OlBB"; } +std::string MoreOverloads::call(const OlCC&) { return "OlCC"; } +std::string MoreOverloads::call(const OlDD&) { return "OlDD"; } -std::string more_overloads2::call(const dd_ol*, int) { return "dd_olptr"; } -std::string more_overloads2::call(const dd_ol&, int) { return "dd_olref"; } +std::string MoreOverloads::call_unknown(const OlDD&) { return "OlDD"; } +std::string MoreOverloads::call(double) { return "double"; } +std::string MoreOverloads::call(int) { return "int"; } +std::string MoreOverloads::call1(int) { return "int"; } +std::string MoreOverloads::call1(double) { return "double"; } -double calc_mean(long n, const float* a) { return calc_mean(n, a); } -double calc_mean(long n, const double* a) { return calc_mean(n, a); } -double calc_mean(long n, const int* a) { return calc_mean(n, a); } -double calc_mean(long n, const short* a) { return calc_mean(n, a); } -double calc_mean(long n, const long* a) { return calc_mean(n, a); } + +//=========================================================================== +MoreOverloads2::MoreOverloads2() {} +std::string MoreOverloads2::call(const OlBB&) { return "OlBB&"; } +std::string MoreOverloads2::call(const OlBB*) { return "OlBB*"; } + +std::string MoreOverloads2::call(const OlDD*, int) { return "OlDD*"; } +std::string MoreOverloads2::call(const OlDD&, int) { return "OlDD&"; } + + +//=========================================================================== +double calc_mean(long n, const float* a) { return calc_mean_templ(n, a); } +double calc_mean(long n, const double* a) { return calc_mean_templ(n, a); } +double calc_mean(long n, const int* a) { return calc_mean_templ(n, a); } +double calc_mean(long n, const short* a) { return calc_mean_templ(n, a); } +double calc_mean(long n, const long* a) { return calc_mean_templ(n, a); } diff --git a/pypy/module/cppyy/test/overloads.h b/pypy/module/cppyy/test/overloads.h --- a/pypy/module/cppyy/test/overloads.h +++ b/pypy/module/cppyy/test/overloads.h @@ -1,77 +1,89 @@ #include #include -class a_overload { + +//=========================================================================== +class OverloadA { public: - a_overload(); + OverloadA(); int i1, i2; }; -namespace ns_a_overload { - class a_overload { +namespace NamespaceA { + class OverloadA { public: - a_overload(); + OverloadA(); int i1, i2; }; - class b_overload { + class OverloadB { public: int f(const std::vector* v); }; } -namespace ns_b_overload { - class a_overload { +namespace NamespaceB { + class OverloadA { public: - a_overload(); + OverloadA(); int i1, i2; }; } -class b_overload { +class OverloadB { public: - b_overload(); + OverloadB(); int i1, i2; }; -class c_overload { + +//=========================================================================== +class OverloadC { public: - c_overload(); - int get_int(a_overload* a); - int get_int(ns_a_overload::a_overload* a); - int get_int(ns_b_overload::a_overload* a); + OverloadC(); + int get_int(OverloadA* a); + int get_int(NamespaceA::OverloadA* a); + int get_int(NamespaceB::OverloadA* a); int get_int(short* p); - int get_int(b_overload* b); + int get_int(OverloadB* b); int get_int(int* p); }; -class d_overload { + +//=========================================================================== +class OverloadD { public: - d_overload(); + OverloadD(); // int get_int(void* p) { return *(int*)p; } int get_int(int* p); - int get_int(b_overload* b); + int get_int(OverloadB* b); int get_int(short* p); - int get_int(ns_b_overload::a_overload* a); - int get_int(ns_a_overload::a_overload* a); - int get_int(a_overload* a); + int get_int(NamespaceB::OverloadA* a); + int get_int(NamespaceA::OverloadA* a); + int get_int(OverloadA* a); }; -class aa_ol {}; -class bb_ol; -class cc_ol {}; -class dd_ol; +//=========================================================================== +class OlAA {}; +class OlBB; +struct OlCC {}; +struct OlDD; -class more_overloads { +OlBB* get_OlBB(); +OlDD* get_OlDD(); + + +//=========================================================================== +class MoreOverloads { public: - more_overloads(); - std::string call(const aa_ol&); - std::string call(const bb_ol&, void* n=0); - std::string call(const cc_ol&); - std::string call(const dd_ol&); + MoreOverloads(); + std::string call(const OlAA&); + std::string call(const OlBB&, void* n=0); + std::string call(const OlCC&); + std::string call(const OlDD&); - std::string call_unknown(const dd_ol&); + std::string call_unknown(const OlDD&); std::string call(double); std::string call(int); @@ -79,18 +91,28 @@ std::string call1(double); }; -class more_overloads2 { + +//=========================================================================== +class MoreOverloads2 { public: - more_overloads2(); - std::string call(const bb_ol&); - std::string call(const bb_ol*); + MoreOverloads2(); + std::string call(const OlBB&); + std::string call(const OlBB*); - std::string call(const dd_ol*, int); - std::string call(const dd_ol&, int); + std::string call(const OlDD*, int); + std::string call(const OlDD&, int); }; + +//=========================================================================== +double calc_mean(long n, const float* a); +double calc_mean(long n, const double* a); +double calc_mean(long n, const int* a); +double calc_mean(long n, const short* a); +double calc_mean(long n, const long* a); + template -double calc_mean(long n, const T* a) { +double calc_mean_templ(long n, const T* a) { double sum = 0., sumw = 0.; const T* end = a+n; while (a != end) { @@ -101,8 +123,33 @@ return sum/sumw; } -double calc_mean(long n, const float* a); -double calc_mean(long n, const double* a); -double calc_mean(long n, const int* a); -double calc_mean(long n, const short* a); -double calc_mean(long n, const long* a); +template double calc_mean_templ (long, const float*); +template double calc_mean_templ(long, const double*); +template double calc_mean_templ (long, const int*); +template double calc_mean_templ (long, const short*); +template double calc_mean_templ (long, const long*); + + +//=========================================================================== +#define STLTYPE_INSTANTIATION(STLTYPE, TTYPE, N) \ + std::STLTYPE STLTYPE##_##N; \ + std::STLTYPE::iterator STLTYPE##_##N##_i; \ + std::STLTYPE::const_iterator STLTYPE##_##N##_ci + +#define STLTYPE_INSTANTIATION2(STLTYPE, TTYPE1, TTYPE2, N) \ + std::STLTYPE STLTYPE##_##N; \ + std::pair STLTYPE##_##N##_p; \ + std::pair STLTYPE##_##N##_cp; \ + std::STLTYPE::iterator STLTYPE##_##N##_i; \ + std::STLTYPE::const_iterator STLTYPE##_##N##_ci + + +namespace { + + struct _CppyyVectorInstances { + + STLTYPE_INSTANTIATION(vector, int, 1); + + }; + +} diff --git a/pypy/module/cppyy/test/overloads.xml b/pypy/module/cppyy/test/overloads.xml --- a/pypy/module/cppyy/test/overloads.xml +++ b/pypy/module/cppyy/test/overloads.xml @@ -1,16 +1,22 @@ + - + - - + + - - - + + + + + + + + diff --git a/pypy/module/cppyy/test/overloads_LinkDef.h b/pypy/module/cppyy/test/overloads_LinkDef.h --- a/pypy/module/cppyy/test/overloads_LinkDef.h +++ b/pypy/module/cppyy/test/overloads_LinkDef.h @@ -4,24 +4,31 @@ #pragma link off all classes; #pragma link off all functions; -#pragma link C++ class a_overload; -#pragma link C++ class b_overload; -#pragma link C++ class c_overload; -#pragma link C++ class d_overload; +#pragma link C++ class OverloadA; +#pragma link C++ class OverloadB; +#pragma link C++ class OverloadC; +#pragma link C++ class OverloadD; -#pragma link C++ namespace ns_a_overload; -#pragma link C++ class ns_a_overload::a_overload; -#pragma link C++ class ns_a_overload::b_overload; +#pragma link C++ namespace NamespaceA; +#pragma link C++ class NamespaceA::OverloadA; +#pragma link C++ class NamespaceA::OverloadB; -#pragma link C++ class ns_b_overload; -#pragma link C++ class ns_b_overload::a_overload; +#pragma link C++ class NamespaceB; +#pragma link C++ class NamespaceB::OverloadA; -#pragma link C++ class aa_ol; -#pragma link C++ class cc_ol; +#pragma link C++ class OlAA; +#pragma link C++ class OlCC; +#pragma link C++ function get_OlBB; +#pragma link C++ function get_OlDD; -#pragma link C++ class more_overloads; -#pragma link C++ class more_overloads2; +#pragma link C++ class MoreOverloads; +#pragma link C++ class MoreOverloads2; #pragma link C++ function calc_mean; +#pragma link C++ function calc_mean_templ; +#pragma link C++ function calc_mean_templ; +#pragma link C++ function calc_mean_templ; +#pragma link C++ function calc_mean_templ; +#pragma link C++ function calc_mean_templ; #endif diff --git a/pypy/module/cppyy/test/test_overloads.py b/pypy/module/cppyy/test/test_overloads.py --- a/pypy/module/cppyy/test/test_overloads.py +++ b/pypy/module/cppyy/test/test_overloads.py @@ -22,54 +22,54 @@ return cppyy.load_reflection_info(%r)""" % (test_dct, )) def test01_class_based_overloads(self): - """Test functions overloaded on different C++ clases""" + """Functions overloaded on different C++ class arguments""" import cppyy - a_overload = cppyy.gbl.a_overload - b_overload = cppyy.gbl.b_overload - c_overload = cppyy.gbl.c_overload - d_overload = cppyy.gbl.d_overload + OverloadA = cppyy.gbl.OverloadA + OverloadB = cppyy.gbl.OverloadB + OverloadC = cppyy.gbl.OverloadC + OverloadD = cppyy.gbl.OverloadD - ns_a_overload = cppyy.gbl.ns_a_overload - ns_b_overload = cppyy.gbl.ns_b_overload + NamespaceA = cppyy.gbl.NamespaceA + NamespaceB = cppyy.gbl.NamespaceB - assert c_overload().get_int(a_overload()) == 42 - assert c_overload().get_int(b_overload()) == 13 - assert d_overload().get_int(a_overload()) == 42 - assert d_overload().get_int(b_overload()) == 13 + assert OverloadC().get_int(OverloadA()) == 42 + assert OverloadC().get_int(OverloadB()) == 13 + assert OverloadD().get_int(OverloadA()) == 42 + assert OverloadD().get_int(OverloadB()) == 13 - assert c_overload().get_int(ns_a_overload.a_overload()) == 88 - assert c_overload().get_int(ns_b_overload.a_overload()) == -33 + assert OverloadC().get_int(NamespaceA.OverloadA()) == 88 + assert OverloadC().get_int(NamespaceB.OverloadA()) == -33 - assert d_overload().get_int(ns_a_overload.a_overload()) == 88 - assert d_overload().get_int(ns_b_overload.a_overload()) == -33 + assert OverloadD().get_int(NamespaceA.OverloadA()) == 88 + assert OverloadD().get_int(NamespaceB.OverloadA()) == -33 def test02_class_based_overloads_explicit_resolution(self): - """Test explicitly resolved function overloads""" + """Explicitly resolved function overloads""" import cppyy - a_overload = cppyy.gbl.a_overload - b_overload = cppyy.gbl.b_overload - c_overload = cppyy.gbl.c_overload - d_overload = cppyy.gbl.d_overload + OverloadA = cppyy.gbl.OverloadA + OverloadB = cppyy.gbl.OverloadB + OverloadC = cppyy.gbl.OverloadC + OverloadD = cppyy.gbl.OverloadD - ns_a_overload = cppyy.gbl.ns_a_overload + NamespaceA = cppyy.gbl.NamespaceA - c = c_overload() + c = OverloadC() raises(TypeError, c.__dispatch__, 'get_int', 12) - raises(TypeError, c.__dispatch__, 'get_int', 'does_not_exist') - assert c.__dispatch__('get_int', 'a_overload*')(a_overload()) == 42 - assert c.__dispatch__('get_int', 'b_overload*')(b_overload()) == 13 + raises(LookupError, c.__dispatch__, 'get_int', 'does_not_exist') + assert c.__dispatch__('get_int', 'OverloadA*')(OverloadA()) == 42 + assert c.__dispatch__('get_int', 'OverloadB*')(OverloadB()) == 13 - assert c_overload().__dispatch__('get_int', 'a_overload*')(a_overload()) == 42 - # TODO: #assert c_overload.__dispatch__('get_int', 'b_overload*')(c, b_overload()) == 13 + assert OverloadC().__dispatch__('get_int', 'OverloadA*')(OverloadA()) == 42 + # TODO: #assert c_overload.__dispatch__('get_int', 'OverloadB*')(c, OverloadB()) == 13 - d = d_overload() - assert d.__dispatch__('get_int', 'a_overload*')(a_overload()) == 42 - assert d.__dispatch__('get_int', 'b_overload*')(b_overload()) == 13 + d = OverloadD() + assert d.__dispatch__('get_int', 'OverloadA*')(OverloadA()) == 42 + assert d.__dispatch__('get_int', 'OverloadB*')(OverloadB()) == 13 - nb = ns_a_overload.b_overload() - raises(TypeError, nb.f, c_overload()) + nb = NamespaceA.OverloadB() + raises(TypeError, nb.f, OverloadC()) def test03_fragile_class_based_overloads(self): """Test functions overloaded on void* and non-existing classes""" @@ -77,58 +77,63 @@ # TODO: make Reflex generate unknown classes ... import cppyy - more_overloads = cppyy.gbl.more_overloads - aa_ol = cppyy.gbl.aa_ol -# bb_ol = cppyy.gbl.bb_ol - cc_ol = cppyy.gbl.cc_ol -# dd_ol = cppyy.gbl.dd_ol + MoreOverloads = cppyy.gbl.MoreOverloads - assert more_overloads().call(aa_ol()) == "aa_ol" -# assert more_overloads().call(bb_ol()) == "dd_ol" # <- bb_ol has an unknown + void* - assert more_overloads().call(cc_ol()) == "cc_ol" -# assert more_overloads().call(dd_ol()) == "dd_ol" # <- dd_ol has an unknown + OlAA = cppyy.gbl.OlAA +# OlBB = cppyy.gbl.OlBB + OlCC = cppyy.gbl.OlCC +# OlDD = cppyy.gbl.OlDD + + from cppyy.gbl import get_OlBB, get_OlDD + + # first verify that BB and DD are indeed unknown +# raises(RuntimeError, OlBB) +# raises(RuntimeError, OlDD ) + + # then try overloads based on them + assert MoreOverloads().call(OlAA()) == "OlAA" + assert MoreOverloads().call(get_OlBB()) == "OlDD" # <- has an unknown + void* + assert MoreOverloads().call(OlCC()) == "OlCC" + assert MoreOverloads().call(get_OlDD()) == "OlDD" # <- has an unknown def test04_fully_fragile_overloads(self): - """Test that unknown* is preferred over unknown&""" - - # TODO: make Reflex generate unknown classes ... - return + """An unknown* is preferred over unknown&""" import cppyy - more_overloads2 = cppyy.gbl.more_overloads2 - bb_ol = cppyy.gbl.bb_ol - dd_ol = cppyy.gbl.dd_ol + MoreOverloads2 = cppyy.gbl.MoreOverloads2 - assert more_overloads2().call(bb_ol()) == "bb_olptr" - assert more_overloads2().call(dd_ol(), 1) == "dd_olptr" + from cppyy.gbl import get_OlBB, get_OlDD + + assert MoreOverloads2().call(get_OlBB()) == "OlBB*" + assert MoreOverloads2().call(get_OlDD(), 1) == "OlDD*" def test05_array_overloads(self): - """Test functions overloaded on different arrays""" + """Functions overloaded on different arrays""" import cppyy - c_overload = cppyy.gbl.c_overload - d_overload = cppyy.gbl.d_overload + OverloadC = cppyy.gbl.OverloadC + OverloadD = cppyy.gbl.OverloadD from array import array ai = array('i', [525252]) - assert c_overload().get_int(ai) == 525252 - assert d_overload().get_int(ai) == 525252 + assert OverloadC().get_int(ai) == 525252 + assert OverloadD().get_int(ai) == 525252 ah = array('h', [25]) - assert c_overload().get_int(ah) == 25 - assert d_overload().get_int(ah) == 25 + assert OverloadC().get_int(ah) == 25 + assert OverloadD().get_int(ah) == 25 def test06_double_int_overloads(self): - """Test overloads on int/doubles""" + """Overloads on int/doubles""" import cppyy - more_overloads = cppyy.gbl.more_overloads + MoreOverloads = cppyy.gbl.MoreOverloads - assert more_overloads().call(1) == "int" - assert more_overloads().call(1.) == "double" - assert more_overloads().call1(1) == "int" - assert more_overloads().call1(1.) == "double" + assert MoreOverloads().call(1) == "int" + assert MoreOverloads().call(1.) == "double" + assert MoreOverloads().call1(1) == "int" + assert MoreOverloads().call1(1.) == "double" def test07_mean_overloads(self): """Adapted test for array overloading""" @@ -142,3 +147,16 @@ for l in ['f', 'd', 'i', 'h', 'l']: a = array.array(l, numbers) assert round(cmean(len(a), a) - mean, 8) == 0 + + def test08_templated_mean_overloads(self): + """Adapted test for array overloading with templates""" + + import cppyy, array + cmean = cppyy.gbl.calc_mean_templ + + numbers = [8, 2, 4, 2, 4, 2, 4, 4, 1, 5, 6, 3, 7] + mean, median = 4.0, 4.0 + + for l in ['f', 'd', 'i', 'h', 'l']: + a = array.array(l, numbers) + assert round(cmean(len(a), a) - mean, 8) == 0 From noreply at buildbot.pypy.org Wed May 14 19:24:45 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 May 2014 19:24:45 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: change test now that unknown*'s are usuable Message-ID: <20140514172445.355A51C044C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71509:3fe86f7471af Date: 2014-05-14 10:23 -0700 http://bitbucket.org/pypy/pypy/changeset/3fe86f7471af/ Log: change test now that unknown*'s are usuable diff --git a/pypy/module/cppyy/test/fragile.h b/pypy/module/cppyy/test/fragile.h --- a/pypy/module/cppyy/test/fragile.h +++ b/pypy/module/cppyy/test/fragile.h @@ -17,7 +17,7 @@ class C { public: virtual int check() { return (int)'C'; } - void use_no_such(no_such_class*) {} + long use_no_such(no_such_class* p) { return (long)p; } }; class D { diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -55,7 +55,8 @@ assert fragile.C == fragile.C assert fragile.C().check() == ord('C') - raises(TypeError, fragile.C().use_no_such, None) + assert fragile.C().use_no_such(None) == 0 + assert fragile.C().use_no_such(fragile.B().gime_no_such()) == 0 def test03_arguments(self): """Test reporting when providing wrong arguments""" From noreply at buildbot.pypy.org Wed May 14 19:24:46 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 14 May 2014 19:24:46 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: rename destruct -> __destruct__ to prevent possible clashes Message-ID: <20140514172446.649861C044C@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71510:bef4d3b1cf9b Date: 2014-05-14 10:24 -0700 http://bitbucket.org/pypy/pypy/changeset/bef4d3b1cf9b/ Log: rename destruct -> __destruct__ to prevent possible clashes diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -1181,7 +1181,7 @@ __len__ = interp2app(W_CPPInstance.instance__len__), __cmp__ = interp2app(W_CPPInstance.instance__cmp__), __repr__ = interp2app(W_CPPInstance.instance__repr__), - destruct = interp2app(W_CPPInstance.destruct), + __destruct__ = interp2app(W_CPPInstance.destruct), ) W_CPPInstance.typedef.acceptable_as_base_class = True diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -35,25 +35,25 @@ assert d.m_a == t(11) assert d.m_b == t(22) assert d.m_c == t(33) - d.destruct() + d.__destruct__() d = defaulter(0) assert d.m_a == t(0) assert d.m_b == t(22) assert d.m_c == t(33) - d.destruct() + d.__destruct__() d = defaulter(1, 2) assert d.m_a == t(1) assert d.m_b == t(2) assert d.m_c == t(33) - d.destruct() + d.__destruct__() d = defaulter(3, 4, 5) assert d.m_a == t(3) assert d.m_b == t(4) assert d.m_c == t(5) - d.destruct() + d.__destruct__() test_defaulter('short', int) test_defaulter('ushort', int) test_defaulter('int', int) @@ -90,7 +90,7 @@ assert b.m_db == 11.11 assert b.get_base_value() == 11.11 - b.destruct() + b.__destruct__() d = derived_class() assert isinstance(d, derived_class) @@ -117,7 +117,7 @@ assert d.m_db == 11.11 assert d.get_base_value() == 11.11 - d.destruct() + d.__destruct__() def test03_namespaces(self): """Test access to namespaces and inner classes""" @@ -222,7 +222,7 @@ t1 = gbl.T1(int)() assert t1.m_t1 == 1 assert t1.get_value() == 1 - t1.destruct() + t1.__destruct__() #----- t1 = gbl.T1(int)(11) @@ -231,14 +231,14 @@ t1.m_t1 = 111 assert t1.get_value() == 111 assert t1.m_t1 == 111 - t1.destruct() + t1.__destruct__() #----- t2 = gbl.T2(gbl.T1(int))(gbl.T1(int)(32)) t2.m_t2.m_t1 = 32 assert t2.m_t2.get_value() == 32 assert t2.m_t2.m_t1 == 32 - t2.destruct() + t2.__destruct__() def test05_abstract_classes(self): @@ -299,7 +299,7 @@ b.m_db = 22.22 assert b.m_db == 22.22 - b.destruct() + b.__destruct__() #----- c1 = c_class_1() @@ -320,7 +320,7 @@ assert c1.m_c == 33 assert c1.get_value() == 33 - c1.destruct() + c1.__destruct__() #----- d = d_class() @@ -348,7 +348,7 @@ assert d.m_d == 44 assert d.get_value() == 44 - d.destruct() + d.__destruct__() def test07_pass_by_reference(self): """Test reference passing when using virtual inheritance""" @@ -364,7 +364,7 @@ b.m_a, b.m_b = 11, 22 assert gbl.get_a(b) == 11 assert gbl.get_b(b) == 22 - b.destruct() + b.__destruct__() #----- c = c_class() @@ -372,7 +372,7 @@ assert gbl.get_a(c) == 11 assert gbl.get_b(c) == 22 assert gbl.get_c(c) == 33 - c.destruct() + c.__destruct__() #----- d = d_class() @@ -381,7 +381,7 @@ assert gbl.get_b(d) == 22 assert gbl.get_c(d) == 33 assert gbl.get_d(d) == 44 - d.destruct() + d.__destruct__() def test08_void_pointer_passing(self): """Test passing of variants of void pointer arguments""" @@ -465,8 +465,8 @@ assert not dd1a is dd2 assert not dd1b is dd2 - d2.destruct() - d1.destruct() + d2.__destruct__() + d1.__destruct__() def test11_multi_methods(self): """Test calling of methods from multiple inheritance""" @@ -536,7 +536,7 @@ c1 = cppyy.gbl.create_c1() assert type(c1) == cppyy.gbl.c_class_1 assert c1.m_c == 3 - c1.destruct() + c1.__destruct__() if self.capi_identity == 'CINT': # CINT does not support dynamic casts return @@ -544,7 +544,7 @@ c2 = cppyy.gbl.create_c2() assert type(c2) == cppyy.gbl.c_class_2 assert c2.m_c == 3 - c2.destruct() + c2.__destruct__() def test14_new_overloader(self): """Verify that class-level overloaded new/delete are called""" @@ -563,7 +563,7 @@ assert cppyy.gbl.new_overloader.s_instances == 0 nl = cppyy.gbl.new_overloader() assert cppyy.gbl.new_overloader.s_instances == 1 - nl.destruct() + nl.__destruct__() if self.capi_identity == 'CINT': # do not test delete return diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -101,19 +101,19 @@ assert res == 11 res = t.get_overload("addDataToInt").call(e1, -4) assert res == 3 - e1.destruct() + e1.__destruct__() assert t.get_overload("getCount").call(None) == 0 raises(ReferenceError, 't.get_overload("addDataToInt").call(e1, 4)') e1 = t.get_overload(t.type_name).call(None, 7) e2 = t.get_overload(t.type_name).call(None, 8) assert t.get_overload("getCount").call(None) == 2 - e1.destruct() + e1.__destruct__() assert t.get_overload("getCount").call(None) == 1 - e2.destruct() + e2.__destruct__() assert t.get_overload("getCount").call(None) == 0 - e2.destruct() + e2.__destruct__() assert t.get_overload("getCount").call(None) == 0 raises(TypeError, t.get_overload("addDataToInt").call, 41, 4) @@ -144,7 +144,7 @@ e1 = None gc.collect() assert t.get_overload("getCount").call(None) == 1 - e2.destruct() + e2.__destruct__() assert t.get_overload("getCount").call(None) == 0 e2 = None gc.collect() @@ -181,12 +181,12 @@ e = t.get_overload(t.type_name).call(None, 13) res = t.get_overload("addDataToDouble").call(e, 16) assert round(res-29, 8) == 0. - e.destruct() + e.__destruct__() e = t.get_overload(t.type_name).call(None, -13) res = t.get_overload("addDataToDouble").call(e, 16) assert round(res-3, 8) == 0. - e.destruct() + e.__destruct__() assert t.get_overload("getCount").call(None) == 0 def test07_method_constcharp(self): @@ -203,7 +203,7 @@ assert res == "54" res = t.get_overload("addToStringValue").call(e, "-12") # TODO: this leaks assert res == "30" - e.destruct() + e.__destruct__() assert t.get_overload("getCount").call(None) == 0 def test08_pass_object_by_pointer(self): @@ -222,8 +222,8 @@ t1.get_overload("setPayload").call(e, pl); assert round(t2.get_overload("getData").call(pl)-50., 8) == 0 - e.destruct() - pl.destruct() + e.__destruct__() + pl.__destruct__() assert t1.get_overload("getCount").call(None) == 0 def test09_return_object_by_pointer(self): @@ -242,6 +242,6 @@ pl2 = t1.get_overload("cyclePayload").call(e, pl1); assert round(t2.get_overload("getData").call(pl2)-50., 8) == 0 - e.destruct() - pl1.destruct() + e.__destruct__() + pl1.__destruct__() assert t1.get_overload("getCount").call(None) == 0 diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -109,7 +109,7 @@ assert not hasattr(CppyyTestData, 'm_bool') assert not hasattr(CppyyTestData, 'm_int') - c.destruct() + c.__destruct__() def test03_instance_data_write_access(self): """Write access to instance public data and verify values""" @@ -223,7 +223,7 @@ for i in range(self.N): assert eval('c.m_%s_array2[i]' % names[j]) == b[i] - c.destruct() + c.__destruct__() def test04_array_passing(self): """Passing of arrays as function arguments""" @@ -261,7 +261,7 @@ assert not c.pass_array(cppyy.gbl.nullptr) raises(Exception, c.pass_array(cppyy.gbl.nullptr).__getitem__, 0) # id. id. - c.destruct() + c.__destruct__() def test05_class_read_access(self): """Read access to class public data""" @@ -313,7 +313,7 @@ assert c.s_enum == CppyyTestData.kNothing assert CppyyTestData.s_enum == CppyyTestData.kNothing - c.destruct() + c.__destruct__() def test06_class_data_write_access(self): """Write access to class public data""" @@ -391,7 +391,7 @@ CppyyTestData.s_ldouble = math.pi assert c.s_ldouble == math.pi - c.destruct() + c.__destruct__() def test07_range_access(self): """Integer type ranges""" @@ -407,7 +407,7 @@ raises(ValueError, setattr, c, 'm_uint', -1) raises(ValueError, setattr, c, 'm_ulong', -1) - c.destruct() + c.__destruct__() def test08_type_conversions(self): """Conversions between builtin types""" @@ -425,7 +425,7 @@ raises(TypeError, c.m_int, -1.) raises(TypeError, c.m_int, 1.) - c.destruct() + c.__destruct__() def test09_global_builtin_type(self): """Access to a global builtin type""" @@ -669,7 +669,7 @@ raises(AttributeError, getattr, c, 'm_owns_arrays') - c.destruct() + c.__destruct__() def test18_object_and_pointer_comparisons(self): """Object and pointer comparisons""" diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -84,7 +84,7 @@ assert res == 11 res = instance.addDataToInt(-4) assert res == 3 - instance.destruct() + instance.__destruct__() assert example01_class.getCount() == 0 raises(ReferenceError, 'instance.addDataToInt(4)') return @@ -92,16 +92,16 @@ instance = example01_class(7) instance2 = example01_class(8) assert example01_class.getCount() == 2 - instance.destruct() + instance.__destruct__() assert example01_class.getCount() == 1 - instance2.destruct() + instance2.__destruct__() assert example01_class.getCount() == 0 t = self.example01 instance = example01_class(13) res = instance.addDataToDouble(16) assert round(res-29, 8) == 0. - instance.destruct() + instance.__destruct__() instance = example01_class(-13) res = instance.addDataToDouble(16) assert round(res-3, 8) == 0. @@ -121,7 +121,7 @@ res = instance.staticAddOneToInt(1L) assert res == 2 - instance.destruct() + instance.__destruct__() assert example01_class.getCount() == 0 def test05_passing_object_by_pointer(self): @@ -143,8 +143,8 @@ e.setPayload(pl) assert round(pl.getData()-14., 8) == 0 - pl.destruct() - e.destruct() + pl.__destruct__() + e.__destruct__() assert example01_class.getCount() == 0 def test06_returning_object_by_pointer(self): @@ -163,8 +163,8 @@ pl2 = e.cyclePayload(pl) assert round(pl2.getData()-14., 8) == 0 - pl.destruct() - e.destruct() + pl.__destruct__() + e.__destruct__() assert example01_class.getCount() == 0 def test07_returning_object_by_value(self): @@ -177,16 +177,16 @@ pl2 = example01_class.staticCopyCyclePayload(pl, 38.) assert pl2.getData() == 38. - pl2.destruct() + pl2.__destruct__() e = example01_class(14) pl2 = e.copyCyclePayload(pl) assert round(pl2.getData()-14., 8) == 0 - pl2.destruct() + pl2.__destruct__() - pl.destruct() - e.destruct() + pl.__destruct__() + e.__destruct__() assert example01_class.getCount() == 0 def test08_global_functions(self): @@ -343,7 +343,7 @@ o = example01() assert type(o) == example01 assert example01.getCount() == 1 - o.destruct() + o.__destruct__() assert example01.getCount() == 0 class MyClass1(example01): @@ -355,7 +355,7 @@ assert isinstance(o, example01) assert example01.getCount() == 1 assert o.myfunc() == 1 - o.destruct() + o.__destruct__() assert example01.getCount() == 0 class MyClass2(example01): @@ -367,7 +367,7 @@ assert type(o) == MyClass2 assert example01.getCount() == 1 assert o.what == 'hi' - o.destruct() + o.__destruct__() assert example01.getCount() == 0 diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -102,7 +102,7 @@ assert v[i].m_i == i assert len(v) == self.N - v.destruct() + v.__destruct__() def test03_empty_vector_type(self): """Test behavior of empty std::vector""" @@ -112,7 +112,7 @@ v = cppyy.gbl.std.vector(int)() for arg in v: pass - v.destruct() + v.__destruct__() def test04_vector_iteration(self): """Test iteration over an std::vector""" @@ -137,7 +137,7 @@ assert list(v) == [i for i in range(self.N)] - v.destruct() + v.__destruct__() def test05_push_back_iterables_with_iadd(self): """Test usage of += of iterable on push_back-able container""" From noreply at buildbot.pypy.org Wed May 14 20:24:21 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 20:24:21 +0200 (CEST) Subject: [pypy-commit] pypy HopArg: hg merge default Message-ID: <20140514182421.753371C02F3@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: HopArg Changeset: r71511:817c2e321208 Date: 2014-05-14 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/817c2e321208/ Log: hg merge default diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -37,91 +37,6 @@ startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) - -class AbstractCharRepr(AbstractStringRepr): - def rtype_method_lower(self, hop): - char_repr = hop.args_r[0].char_repr - v_chr, = hop.inputargs(char_repr) - hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_lower_char, v_chr) - - def rtype_method_upper(self, hop): - char_repr = hop.args_r[0].char_repr - v_chr, = hop.inputargs(char_repr) - hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_upper_char, v_chr) - - -class AbstractUniCharRepr(AbstractStringRepr): - pass - -class AbstractUnicodeRepr(AbstractStringRepr): - - def __init__(self, *args): - AbstractStringRepr.__init__(self, *args) - self.runicode_encode_utf_8 = None - - def ensure_ll_encode_utf8(self): - from rpython.rlib.runicode import unicode_encode_utf_8_impl - self.runicode_encode_utf_8 = func_with_new_name( - unicode_encode_utf_8_impl, 'runicode_encode_utf_8') - - def rtype_method_upper(self, hop): - raise TypeError("Cannot do toupper on unicode string") - - def rtype_method_lower(self, hop): - raise TypeError("Cannot do tolower on unicode string") - - @jit.elidable - def ll_encode_utf8(self, ll_s): - from rpython.rtyper.annlowlevel import hlunicode - s = hlunicode(ll_s) - assert s is not None - bytes = self.runicode_encode_utf_8( - s, len(s), 'strict', - errorhandler=self.ll_raise_unicode_exception_encode, - allow_surrogates=False) - return self.ll.llstr(bytes) - - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, - startingpos, endingpos): - raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) - -class __extend__(annmodel.SomeString): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import string_repr - return string_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeUnicodeString): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import unicode_repr - return unicode_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeChar): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import char_repr - return char_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeUnicodeCodePoint): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import unichar_repr - return unichar_repr - - def rtyper_makekey(self): - return self.__class__, - - -class __extend__(AbstractStringRepr): - def _str_reprs(self, hop): return hop.args_r[0].repr, hop.args_r[1].repr @@ -407,7 +322,7 @@ return hop.inputconst(hop.r_result, hop.s_result.const) repr = hop.args_r[0].repr v_str = hop.inputarg(repr, 0) - if repr == hop.r_result: # the argument is a unicode string already + if repr == hop.r_result: # the argument is a unicode string already hop.exception_cannot_occur() return v_str hop.exception_is_here() @@ -447,7 +362,46 @@ else: return self.ll.ll_constant('None') -class __extend__(AbstractUnicodeRepr): + def rtype_getslice(r_str, hop): + string_repr = r_str.repr + v_str = hop.inputarg(string_repr, arg=0) + kind, vlist = hop.decompose_slice_args() + ll_fn = getattr(r_str.ll, 'll_stringslice_%s' % (kind,)) + return hop.gendirectcall(ll_fn, v_str, *vlist) + + +class AbstractUnicodeRepr(AbstractStringRepr): + + def __init__(self, *args): + AbstractStringRepr.__init__(self, *args) + self.runicode_encode_utf_8 = None + + def ensure_ll_encode_utf8(self): + from rpython.rlib.runicode import unicode_encode_utf_8_impl + self.runicode_encode_utf_8 = func_with_new_name( + unicode_encode_utf_8_impl, 'runicode_encode_utf_8') + + def rtype_method_upper(self, hop): + raise TypeError("Cannot do toupper on unicode string") + + def rtype_method_lower(self, hop): + raise TypeError("Cannot do tolower on unicode string") + + @jit.elidable + def ll_encode_utf8(self, ll_s): + from rpython.rtyper.annlowlevel import hlunicode + s = hlunicode(ll_s) + assert s is not None + bytes = self.runicode_encode_utf_8( + s, len(s), 'strict', + errorhandler=self.ll_raise_unicode_exception_encode, + allow_surrogates=False) + return self.ll.llstr(bytes) + + def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + startingpos, endingpos): + raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) + def rtype_method_encode(self, hop): if not hop.args_s[1].is_constant(): raise TyperError("encoding must be constant") @@ -468,6 +422,117 @@ else: raise TyperError("encoding %s not implemented" % (encoding, )) +class BaseCharReprMixin(object): + + def convert_const(self, value): + if not isinstance(value, str) or len(value) != 1: + raise TyperError("not a character: %r" % (value,)) + return value + + def get_ll_eq_function(self): + return None + + def get_ll_hash_function(self): + return self.ll.ll_char_hash + + get_ll_fasthash_function = get_ll_hash_function + + def rtype_len(_, hop): + return hop.inputconst(Signed, 1) + + def rtype_bool(_, hop): + assert not hop.args_s[0].can_be_None + return hop.inputconst(Bool, True) + + def rtype_ord(_, hop): + repr = hop.args_r[0].char_repr + vlist = hop.inputargs(repr) + return hop.genop('cast_char_to_int', vlist, resulttype=Signed) + + def _rtype_method_isxxx(_, llfn, hop): + repr = hop.args_r[0].char_repr + vlist = hop.inputargs(repr) + hop.exception_cannot_occur() + return hop.gendirectcall(llfn, vlist[0]) + + def rtype_method_isspace(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isspace, hop) + + def rtype_method_isdigit(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isdigit, hop) + + def rtype_method_isalpha(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isalpha, hop) + + def rtype_method_isalnum(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isalnum, hop) + + def rtype_method_isupper(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isupper, hop) + + def rtype_method_islower(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_islower, hop) + + +class AbstractCharRepr(BaseCharReprMixin, AbstractStringRepr): + def rtype_method_lower(self, hop): + char_repr = hop.args_r[0].char_repr + v_chr, = hop.inputargs(char_repr) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_lower_char, v_chr) + + def rtype_method_upper(self, hop): + char_repr = hop.args_r[0].char_repr + v_chr, = hop.inputargs(char_repr) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_upper_char, v_chr) + + def ll_str(self, ch): + return self.ll.ll_chr2str(ch) + + +class AbstractUniCharRepr(BaseCharReprMixin, AbstractStringRepr): + + def ll_str(self, ch): + # xxx suboptimal, maybe + return str(unicode(ch)) + + def ll_unicode(self, ch): + return unicode(ch) + + +class __extend__(annmodel.SomeString): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import string_repr + return string_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeUnicodeString): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import unicode_repr + return unicode_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeChar): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import char_repr + return char_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeUnicodeCodePoint): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import unichar_repr + return unichar_repr + + def rtyper_makekey(self): + return self.__class__, + class __extend__(pairtype(AbstractStringRepr, Repr)): def rtype_mod((r_str, _), hop): @@ -475,6 +540,7 @@ # overriding rtype_mod() below return r_str.ll.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + class __extend__(pairtype(AbstractStringRepr, FloatRepr)): def rtype_mod(_, hop): from rpython.rtyper.lltypesystem.rstr import do_stringformat @@ -520,15 +586,6 @@ rtype_inplace_mul = rtype_mul -class __extend__(AbstractStringRepr): - - def rtype_getslice(r_str, hop): - string_repr = r_str.repr - v_str = hop.inputarg(string_repr, arg=0) - kind, vlist = hop.decompose_slice_args() - ll_fn = getattr(r_str.ll, 'll_stringslice_%s' % (kind,)) - return hop.gendirectcall(ll_fn, v_str, *vlist) - class __extend__(pairtype(AbstractStringRepr, AbstractStringRepr)): def rtype_add((r_str1, r_str2), hop): str1_repr = r_str1.repr @@ -592,65 +649,6 @@ return hop.gendirectcall(r_str.ll.ll_contains, v_str, v_chr) -class __extend__(AbstractCharRepr): - def ll_str(self, ch): - return self.ll.ll_chr2str(ch) - -class __extend__(AbstractUniCharRepr): - def ll_str(self, ch): - # xxx suboptimal, maybe - return str(unicode(ch)) - - def ll_unicode(self, ch): - return unicode(ch) - -class __extend__(AbstractCharRepr, - AbstractUniCharRepr): - - def convert_const(self, value): - if not isinstance(value, str) or len(value) != 1: - raise TyperError("not a character: %r" % (value,)) - return value - - def get_ll_eq_function(self): - return None - - def get_ll_hash_function(self): - return self.ll.ll_char_hash - - get_ll_fasthash_function = get_ll_hash_function - - def rtype_len(_, hop): - return hop.inputconst(Signed, 1) - - def rtype_bool(_, hop): - assert not hop.args_s[0].can_be_None - return hop.inputconst(Bool, True) - - def rtype_ord(_, hop): - repr = hop.args_r[0].char_repr - vlist = hop.inputargs(repr) - return hop.genop('cast_char_to_int', vlist, resulttype=Signed) - - def _rtype_method_isxxx(_, llfn, hop): - repr = hop.args_r[0].char_repr - vlist = hop.inputargs(repr) - hop.exception_cannot_occur() - return hop.gendirectcall(llfn, vlist[0]) - - def rtype_method_isspace(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isspace, hop) - def rtype_method_isdigit(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isdigit, hop) - def rtype_method_isalpha(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isalpha, hop) - def rtype_method_isalnum(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isalnum, hop) - def rtype_method_isupper(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isupper, hop) - def rtype_method_islower(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_islower, hop) - class __extend__(pairtype(AbstractCharRepr, IntegerRepr), pairtype(AbstractUniCharRepr, IntegerRepr)): From noreply at buildbot.pypy.org Wed May 14 20:24:22 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 20:24:22 +0200 (CEST) Subject: [pypy-commit] pypy HopArg: Create LowLevelType._contains_value() Message-ID: <20140514182422.C6EF41C02F3@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: HopArg Changeset: r71512:dab361acad27 Date: 2014-05-14 19:22 +0100 http://bitbucket.org/pypy/pypy/changeset/dab361acad27/ Log: Create LowLevelType._contains_value() diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -185,6 +185,11 @@ def _is_varsize(self): return False + def _contains_value(self, value): + if self is Void: + return True + return isCompatibleType(typeOf(value), self) + NFOUND = object() class ContainerType(LowLevelType): diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,8 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, - LowLevelType, isCompatibleType) +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -120,14 +119,9 @@ def convert_const(self, value): "Convert the given constant value to the low-level repr of 'self'." - if self.lowleveltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError, TypeError): - realtype = '???' - if realtype != self.lowleveltype: - raise TyperError("convert_const(self = %r, value = %r)" % ( - self, value)) + if not self.lowleveltype._contains_value(value): + raise TyperError("convert_const(self = %r, value = %r)" % ( + self, value)) return value def get_ll_eq_function(self): @@ -367,18 +361,9 @@ lltype = reqtype else: raise TypeError(repr(reqtype)) - # Void Constants can hold any value; - # non-Void Constants must hold a correctly ll-typed value - if lltype is not Void: - try: - realtype = typeOf(value) - except (AssertionError, AttributeError): - realtype = '???' - if not isCompatibleType(realtype, lltype): - raise TyperError("inputconst(reqtype = %s, value = %s):\n" - "expected a %r,\n" - " got a %r" % (reqtype, value, - lltype, realtype)) + if not lltype._contains_value(value): + raise TyperError("inputconst(): expected a %r, got %r" % + (lltype, value)) c = Constant(value) c.concretetype = lltype return c From noreply at buildbot.pypy.org Wed May 14 21:05:12 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 21:05:12 +0200 (CEST) Subject: [pypy-commit] pypy HopArg: fix r_os_stat, maybe (is this even tested?) Message-ID: <20140514190512.4D0EF1C044C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: HopArg Changeset: r71513:d1c416a40298 Date: 2014-05-14 20:04 +0100 http://bitbucket.org/pypy/pypy/changeset/d1c416a40298/ Log: fix r_os_stat, maybe (is this even tested?) diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -14,6 +14,7 @@ from rpython.rtyper.rmodel import Repr from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.error import TyperError +from rpython.rtyper.rtyper import HopArg from rpython.rtyper.module import ll_os_stat @@ -34,12 +35,12 @@ def redispatch_getfield(self, hop, index): rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) hop2 = hop.copy() hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + h_index = HopArg(Constant(index), s_index, rtyper.getrepr(s_index)) + h_tuple = HopArg(hop2.args[0].v, self.s_tuple, self.r_tuple) + hop2.args = [h_tuple, h_index] return hop2.dispatch() def rtype_getattr(self, hop): From noreply at buildbot.pypy.org Wed May 14 21:11:20 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 14 May 2014 21:11:20 +0200 (CEST) Subject: [pypy-commit] pypy default: fix potential UnicodeEncodeErrors under pytest.py --resultlog: the captured Message-ID: <20140514191120.5DBEC1C02F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r71514:ef6d8454c0a6 Date: 2014-05-14 12:10 -0700 http://bitbucket.org/pypy/pypy/changeset/ef6d8454c0a6/ Log: fix potential UnicodeEncodeErrors under pytest.py --resultlog: the captured output may come in as unicode diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -56,6 +56,9 @@ for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) for key, text in sections: + # py.io.StdCaptureFD may send in unicode + if isinstance(text, unicode): + text = text.encode('utf-8') py.builtin.print_(" ", file=self.logfile) py.builtin.print_(" -------------------- %s --------------------" % key.rstrip(), file=self.logfile) From noreply at buildbot.pypy.org Wed May 14 21:34:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 May 2014 21:34:32 +0200 (CEST) Subject: [pypy-commit] pypy default: MSVC link must find all functions, so provide dummy implementations Message-ID: <20140514193432.DE11A1D29B2@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71515:13c05fef6513 Date: 2014-05-14 14:42 +0300 http://bitbucket.org/pypy/pypy/changeset/13c05fef6513/ Log: MSVC link must find all functions, so provide dummy implementations diff --git a/rpython/translator/c/src/allocator.c b/rpython/translator/c/src/allocator.c --- a/rpython/translator/c/src/allocator.c +++ b/rpython/translator/c/src/allocator.c @@ -24,5 +24,10 @@ # include "src/obmalloc.c" #endif +#elif defined _MSC_VER +/* link will fail without some kind of definition for the functions */ + void *PyObject_Malloc(size_t n) { return NULL; } + void *PyObject_Realloc(void *p, size_t n) { return NULL; } + void PyObject_Free(void *p) { } #endif /* PYPY_STANDALONE */ From noreply at buildbot.pypy.org Wed May 14 21:34:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 May 2014 21:34:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix shared object suffix Message-ID: <20140514193434.0C1CA1D29B2@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71516:6933e9ded4de Date: 2014-05-14 14:55 +0300 http://bitbucket.org/pypy/pypy/changeset/6933e9ded4de/ Log: fix shared object suffix diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1297,7 +1297,10 @@ t, cbuilder = self.compile(entry_point, shared=True, entrypoints=[f]) + ext_suffix = '.so' + if cbuilder.eci.platform.name == 'msvc': + ext_suffix = '.dll' libname = cbuilder.executable_name.join('..', 'lib' + - cbuilder.modulename + '.so') + cbuilder.modulename + ext_suffix) lib = ctypes.CDLL(str(libname)) assert lib.foo(13) == 16 From noreply at buildbot.pypy.org Wed May 14 21:34:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 May 2014 21:34:35 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test for windows, document and fix test for : in PYPYLOG filename Message-ID: <20140514193435.357121D29B2@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71517:9ab80e27fa27 Date: 2014-05-14 22:31 +0300 http://bitbucket.org/pypy/pypy/changeset/9ab80e27fa27/ Log: fix test for windows, document and fix test for : in PYPYLOG filename diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -100,6 +100,8 @@ ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. + Note that using a : in fname is a bad idea, Windows + users, beware. ``:``\ *fname* Full logging, including ``debug_print``. diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -304,8 +304,13 @@ assert " ll_strtod.o" in makefile def test_debug_print_start_stop(self): + import sys from rpython.rtyper.lltypesystem import rffi - + if sys.platform == 'win32': + # ftell(stderr) is a bit different under subprocess.Popen + tell = 0 + else: + tell = -1 def entry_point(argv): x = "got:" debug_start ("mycat") @@ -327,7 +332,7 @@ t, cbuilder = self.compile(entry_point) # check with PYPYLOG undefined out, err = cbuilder.cmdexec("", err=True, env={}) - assert out.strip() == 'got:a.-1.' + assert out.strip() == 'got:a.%d.' % tell assert 'toplevel' in err assert 'mycat' not in err assert 'foo 2 bar 3' not in err @@ -336,7 +341,7 @@ assert 'bok' not in err # check with PYPYLOG defined to an empty string (same as undefined) out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': ''}) - assert out.strip() == 'got:a.-1.' + assert out.strip() == 'got:a.%d.' % tell assert 'toplevel' in err assert 'mycat' not in err assert 'foo 2 bar 3' not in err @@ -345,7 +350,7 @@ assert 'bok' not in err # check with PYPYLOG=:- (means print to stderr) out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': ':-'}) - assert out.strip() == 'got:bcda.-1.' + assert out.strip() == 'got:bcda.%d.' % tell assert 'toplevel' in err assert '{mycat' in err assert 'mycat}' in err @@ -374,20 +379,24 @@ assert 'bok' in data # check with PYPYLOG=somefilename path = udir.join('test_debug_xxx_prof.log') - out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': str(path)}) - size = os.stat(str(path)).st_size - assert out.strip() == 'got:a.' + str(size) + '.' - assert not err - assert path.check(file=1) - data = path.read() - assert 'toplevel' in data - assert '{mycat' in data - assert 'mycat}' in data - assert 'foo 2 bar 3' not in data - assert '{cat2' in data - assert 'cat2}' in data - assert 'baz' not in data - assert 'bok' not in data + if str(path).find(':')>=0: + # bad choice of udir, there is a ':' in it which messes up the test + pass + else: + out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': str(path)}) + size = os.stat(str(path)).st_size + assert out.strip() == 'got:a.' + str(size) + '.' + assert not err + assert path.check(file=1) + data = path.read() + assert 'toplevel' in data + assert '{mycat' in data + assert 'mycat}' in data + assert 'foo 2 bar 3' not in data + assert '{cat2' in data + assert 'cat2}' in data + assert 'baz' not in data + assert 'bok' not in data # check with PYPYLOG=myc:somefilename (includes mycat but not cat2) path = udir.join('test_debug_xxx_myc.log') out, err = cbuilder.cmdexec("", err=True, From noreply at buildbot.pypy.org Wed May 14 22:07:51 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 May 2014 22:07:51 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20140514200751.765C91C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71518:3920cce43114 Date: 2014-05-14 22:45 +0300 http://bitbucket.org/pypy/pypy/changeset/3920cce43114/ Log: typo diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -298,7 +298,7 @@ return os.getcwd() f1 = compile(does_stuff, [str]) if os.name == 'nt': - assert f1(os.environment['TEMP']) == os.path.realpath(os.environment['TEMP']) + assert f1(os.environ['TEMP']) == os.path.realpath(os.environ['TEMP']) else: assert f1('/tmp') == os.path.realpath('/tmp') From noreply at buildbot.pypy.org Wed May 14 22:07:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 May 2014 22:07:52 +0200 (CEST) Subject: [pypy-commit] pypy default: make test compatible with changeset 5494b1aac76f Message-ID: <20140514200752.99F041C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71519:28b3e7b802de Date: 2014-05-14 23:04 +0300 http://bitbucket.org/pypy/pypy/changeset/28b3e7b802de/ Log: make test compatible with changeset 5494b1aac76f diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -1,5 +1,5 @@ import py -import os, time, sys +import os, time, sys, genericpath from rpython.tool.udir import udir from rpython.rlib.rarithmetic import r_longlong from rpython.annotator import model as annmodel @@ -243,14 +243,16 @@ assert f() == False def test_os_path_isdir(): + # os.path.isdir is not rpython once pywin is installed (win32 specific) + # genericpath.isdir is better. directory = "./." def fn(): - return os.path.isdir(directory) + return genericpath.isdir(directory) f = compile(fn, []) assert f() == True directory = "some/random/name" def fn(): - return os.path.isdir(directory) + return genericpath.isdir(directory) f = compile(fn, []) assert f() == False From noreply at buildbot.pypy.org Wed May 14 22:45:55 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 May 2014 22:45:55 +0200 (CEST) Subject: [pypy-commit] pypy default: make the utf_8_decode slightly more reusable in different contexts, e.g. when we want to calculate the size Message-ID: <20140514204555.AAEC71C02D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71520:af21a0ec95a5 Date: 2014-05-14 22:13 +0200 http://bitbucket.org/pypy/pypy/changeset/af21a0ec95a5/ Log: make the utf_8_decode slightly more reusable in different contexts, e.g. when we want to calculate the size diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -125,15 +125,18 @@ errorhandler=None, allow_surrogates=False): if errorhandler is None: errorhandler = default_unicode_error_decode - return str_decode_utf_8_impl(s, size, errors, final, errorhandler, - allow_surrogates=allow_surrogates) + result = UnicodeBuilder(size) + pos = str_decode_utf_8_impl(s, size, errors, final, errorhandler, + allow_surrogates=allow_surrogates, + result=result) + return result.build(), pos + at specialize.argtype(6) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, - allow_surrogates): + allow_surrogates, result): if size == 0: - return u'', 0 + return 0 - result = UnicodeBuilder(size) pos = 0 while pos < size: ordch1 = ord(s[pos]) @@ -291,7 +294,7 @@ result.append(unichr(0xDC00 + (c & 0x03FF))) pos += 4 - return result.build(), pos + return pos def _encodeUCS4(result, ch): # Encode UCS4 Unicode ordinals From noreply at buildbot.pypy.org Wed May 14 22:45:57 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 May 2014 22:45:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140514204557.58CF91C02D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71521:1a46ed72e30d Date: 2014-05-14 22:45 +0200 http://bitbucket.org/pypy/pypy/changeset/1a46ed72e30d/ Log: merge diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -56,6 +56,9 @@ for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) for key, text in sections: + # py.io.StdCaptureFD may send in unicode + if isinstance(text, unicode): + text = text.encode('utf-8') py.builtin.print_(" ", file=self.logfile) py.builtin.print_(" -------------------- %s --------------------" % key.rstrip(), file=self.logfile) diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -100,6 +100,8 @@ ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. + Note that using a : in fname is a bad idea, Windows + users, beware. ``:``\ *fname* Full logging, including ``debug_print``. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -671,8 +671,9 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - irc_topic = sys.version_info[3] != 'final' or ( - readenv and os.getenv('PYPY_IRC_TOPIC')) + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' or ( + readenv and os.getenv('PYPY_IRC_TOPIC')) success = run_toplevel(interactive_console, mainmodule, quiet=not irc_topic) except SystemExit, e: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,12 +7,8 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir -from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = PYPY_VERSION[3] == "final" - - banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -246,10 +242,6 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt - if is_release: - assert irc_header not in child.before - else: - assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') @@ -269,11 +261,22 @@ child.sendline("'' in sys.path") child.expect("True") - def test_irc_topic(self, monkeypatch): + def test_yes_irc_topic(self, monkeypatch): monkeypatch.setenv('PYPY_IRC_TOPIC', '1') child = self.spawn([]) child.expect(irc_header) # banner + def test_maybe_irc_topic(self): + import sys + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' + child = self.spawn([]) + child.expect('>>>') # banner + if irc_topic: + assert irc_header in child.before + else: + assert irc_header not in child.before + def test_help(self): # test that -h prints the usage, including the name of the executable # which should be /full/path/to/app_main.py in this case @@ -934,6 +937,7 @@ # ---------------------------------------- from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION cpy_ver = '%d.%d' % CPYTHON_VERSION[:2] + from lib_pypy._pypy_interact import irc_header goal_dir = os.path.dirname(app_main) # build a directory hierarchy like which contains both bin/pypy-c and @@ -953,6 +957,7 @@ self.w_fake_exe = self.space.wrap(str(fake_exe)) self.w_expected_path = self.space.wrap(expected_path) self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir)) + self.w_is_release = self.space.wrap(PYPY_VERSION[3] == "final") self.w_tmp_dir = self.space.wrap(tmp_dir) @@ -1022,3 +1027,4 @@ # assert it did not crash finally: sys.path[:] = old_sys_path + diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1,7 +1,7 @@ import py from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import SomePtr -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr from rpython.rtyper.lltypesystem.llmemory import itemoffsetof diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -9,11 +9,11 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import ll_str, llmemory -from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) -from rpython.rtyper.rmodel import inputconst, Repr, IntegerRepr +from rpython.rtyper.rmodel import inputconst, Repr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import (AbstractStringRepr, AbstractCharRepr, AbstractUniCharRepr, AbstractStringIteratorRepr, AbstractLLHelpers, AbstractUnicodeRepr) diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -11,7 +11,8 @@ from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Constant from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import Repr, IntegerRepr +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.error import TyperError from rpython.rtyper.module import ll_os_stat diff --git a/rpython/rtyper/raddress.py b/rpython/rtyper/raddress.py --- a/rpython/rtyper/raddress.py +++ b/rpython/rtyper/raddress.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.llmemory import (NULL, Address, cast_adr_to_int, fakeaddress, sizeof) -from rpython.rtyper.rmodel import Repr, IntegerRepr +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rptr import PtrRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rbool.py b/rpython/rtyper/rbool.py --- a/rpython/rtyper/rbool.py +++ b/rpython/rtyper/rbool.py @@ -1,21 +1,18 @@ from rpython.annotator import model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Unsigned, Bool, Float -from rpython.rtyper.rmodel import IntegerRepr, BoolRepr, log +from rpython.rtyper.rmodel import log +from rpython.rtyper.rint import IntegerRepr +from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomeBool): - def rtyper_makerepr(self, rtyper): - return bool_repr - - def rtyper_makekey(self): - return self.__class__, - -bool_repr = BoolRepr() - - -class __extend__(BoolRepr): +class BoolRepr(IntegerRepr): + lowleveltype = Bool + # NB. no 'opprefix' here. Use 'as_int' systematically. + def __init__(self): + from rpython.rtyper.rint import signed_repr + self.as_int = signed_repr def convert_const(self, value): if not isinstance(value, bool): @@ -23,7 +20,7 @@ return value def rtype_bool(_, hop): - vlist = hop.inputargs(Bool) + vlist = hop.inputargs(bool_repr) return vlist[0] def rtype_int(_, hop): @@ -36,9 +33,33 @@ hop.exception_cannot_occur() return vlist[0] +bool_repr = BoolRepr() + + +class __extend__(annmodel.SomeBool): + def rtyper_makerepr(self, rtyper): + return bool_repr + + def rtyper_makekey(self): + return self.__class__, + # # _________________________ Conversions _________________________ +class __extend__(pairtype(BoolRepr, FloatRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Bool and r_to.lowleveltype == Float: + log.debug('explicit cast_bool_to_float') + return llops.genop('cast_bool_to_float', [v], resulttype=Float) + return NotImplemented + +class __extend__(pairtype(FloatRepr, BoolRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Float and r_to.lowleveltype == Bool: + log.debug('explicit cast_float_to_bool') + return llops.genop('float_is_true', [v], resulttype=Bool) + return NotImplemented + class __extend__(pairtype(BoolRepr, IntegerRepr)): def convert_from_to((r_from, r_to), v, llops): if r_from.lowleveltype == Bool and r_to.lowleveltype == Unsigned: diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -54,7 +54,6 @@ from rpython.rtyper.rtuple import TupleRepr if arguments.w_stararg != hop.nb_args - 3: raise TyperError("call pattern too complex") - hop.nb_args -= 1 v_tuple = hop.args_v.pop() s_tuple = hop.args_s.pop() r_tuple = hop.args_r.pop() @@ -62,7 +61,6 @@ raise TyperError("*arg must be a tuple") for i in range(len(r_tuple.items_r)): v_item = r_tuple.getitem_internal(hop.llops, v_tuple, i) - hop.nb_args += 1 hop.args_v.append(v_item) hop.args_s.append(s_tuple.items[i]) hop.args_r.append(r_tuple.items_r[i]) @@ -177,7 +175,7 @@ result.append(hop.inputarg(r, arg=i)) else: result.append(None) - hop.nb_args -= len(lst) + del hop.args_v[hop.nb_args - len(lst):] return result def get_builtin_method_self(x): @@ -367,8 +365,7 @@ (i_zero, None), (i_track_allocation, None), (i_add_memory_pressure, None)) - (v_flavor, v_zero, v_track_allocation, - v_add_memory_pressure) = kwds_v + (v_flavor, v_zero, v_track_allocation, v_add_memory_pressure) = kwds_v flags = {'flavor': 'gc'} if v_flavor is not None: flags['flavor'] = v_flavor.value diff --git a/rpython/rtyper/rbytearray.py b/rpython/rtyper/rbytearray.py --- a/rpython/rtyper/rbytearray.py +++ b/rpython/rtyper/rbytearray.py @@ -1,6 +1,6 @@ from rpython.annotator import model as annmodel from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import IntegerRepr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import AbstractStringRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -1,16 +1,65 @@ from rpython.annotator import model as annmodel from rpython.rlib.objectmodel import _hash_float from rpython.rlib.rarithmetic import base_int -from rpython.rlib.rfloat import formatd from rpython.rlib import jit from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, - SignedLongLong, UnsignedLongLong, Bool, Float) -from rpython.rtyper.rmodel import FloatRepr, IntegerRepr, BoolRepr, log -from rpython.rtyper.rstr import AbstractStringRepr +from rpython.rtyper.lltypesystem.lltype import (Signed, Bool, Float) +from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype +class FloatRepr(Repr): + lowleveltype = Float + + def convert_const(self, value): + if not isinstance(value, (int, base_int, float)): # can be bool too + raise TyperError("not a float: %r" % (value,)) + return float(value) + + def get_ll_eq_function(self): + return None + get_ll_gt_function = get_ll_eq_function + get_ll_lt_function = get_ll_eq_function + get_ll_ge_function = get_ll_eq_function + get_ll_le_function = get_ll_eq_function + + def get_ll_hash_function(self): + return _hash_float + + def rtype_bool(_, hop): + vlist = hop.inputargs(Float) + return hop.genop('float_is_true', vlist, resulttype=Bool) + + def rtype_neg(_, hop): + vlist = hop.inputargs(Float) + return hop.genop('float_neg', vlist, resulttype=Float) + + def rtype_pos(_, hop): + vlist = hop.inputargs(Float) + return vlist[0] + + def rtype_abs(_, hop): + vlist = hop.inputargs(Float) + return hop.genop('float_abs', vlist, resulttype=Float) + + def rtype_int(_, hop): + vlist = hop.inputargs(Float) + # int(x) never raises in RPython, you need to use + # rarithmetic.ovfcheck_float_to_int() if you want this + hop.exception_cannot_occur() + return hop.genop('cast_float_to_int', vlist, resulttype=Signed) + + def rtype_float(_, hop): + vlist = hop.inputargs(Float) + hop.exception_cannot_occur() + return vlist[0] + + @jit.elidable + def ll_str(self, f): + from rpython.rlib.rfloat import formatd + return llstr(formatd(f, 'f', 6)) + +float_repr = FloatRepr() class __extend__(annmodel.SomeFloat): def rtyper_makerepr(self, rtyper): @@ -20,9 +69,6 @@ return self.__class__, -float_repr = FloatRepr() - - class __extend__(pairtype(FloatRepr, FloatRepr)): #Arithmetic @@ -75,11 +121,6 @@ def rtype_ge(_, hop): return _rtype_compare_template(hop, 'ge') -class __extend__(pairtype(AbstractStringRepr, FloatRepr)): - def rtype_mod(_, hop): - from rpython.rtyper.lltypesystem.rstr import do_stringformat - return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) - #Helpers FloatRepr,FloatRepr def _rtype_template(hop, func): @@ -91,104 +132,6 @@ return hop.genop('float_'+func, vlist, resulttype=Bool) -class __extend__(FloatRepr): - - def convert_const(self, value): - if not isinstance(value, (int, base_int, float)): # can be bool too - raise TyperError("not a float: %r" % (value,)) - return float(value) - - def get_ll_eq_function(self): - return None - get_ll_gt_function = get_ll_eq_function - get_ll_lt_function = get_ll_eq_function - get_ll_ge_function = get_ll_eq_function - get_ll_le_function = get_ll_eq_function - - def get_ll_hash_function(self): - return _hash_float - - def rtype_bool(_, hop): - vlist = hop.inputargs(Float) - return hop.genop('float_is_true', vlist, resulttype=Bool) - - def rtype_neg(_, hop): - vlist = hop.inputargs(Float) - return hop.genop('float_neg', vlist, resulttype=Float) - - def rtype_pos(_, hop): - vlist = hop.inputargs(Float) - return vlist[0] - - def rtype_abs(_, hop): - vlist = hop.inputargs(Float) - return hop.genop('float_abs', vlist, resulttype=Float) - - def rtype_int(_, hop): - vlist = hop.inputargs(Float) - # int(x) never raises in RPython, you need to use - # rarithmetic.ovfcheck_float_to_int() if you want this - hop.exception_cannot_occur() - return hop.genop('cast_float_to_int', vlist, resulttype=Signed) - - def rtype_float(_, hop): - vlist = hop.inputargs(Float) - hop.exception_cannot_occur() - return vlist[0] - - @jit.elidable - def ll_str(self, f): - return llstr(formatd(f, 'f', 6)) - -# -# _________________________ Conversions _________________________ - -class __extend__(pairtype(IntegerRepr, FloatRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Float: - log.debug('explicit cast_uint_to_float') - return llops.genop('cast_uint_to_float', [v], resulttype=Float) - if r_from.lowleveltype == Signed and r_to.lowleveltype == Float: - log.debug('explicit cast_int_to_float') - return llops.genop('cast_int_to_float', [v], resulttype=Float) - if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Float: - log.debug('explicit cast_longlong_to_float') - return llops.genop('cast_longlong_to_float', [v], resulttype=Float) - if r_from.lowleveltype == UnsignedLongLong and r_to.lowleveltype == Float: - log.debug('explicit cast_ulonglong_to_float') - return llops.genop('cast_ulonglong_to_float', [v], resulttype=Float) - return NotImplemented - -class __extend__(pairtype(FloatRepr, IntegerRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Float and r_to.lowleveltype == Unsigned: - log.debug('explicit cast_float_to_uint') - return llops.genop('cast_float_to_uint', [v], resulttype=Unsigned) - if r_from.lowleveltype == Float and r_to.lowleveltype == Signed: - log.debug('explicit cast_float_to_int') - return llops.genop('cast_float_to_int', [v], resulttype=Signed) - if r_from.lowleveltype == Float and r_to.lowleveltype == SignedLongLong: - log.debug('explicit cast_float_to_longlong') - return llops.genop('cast_float_to_longlong', [v], resulttype=SignedLongLong) - if r_from.lowleveltype == Float and r_to.lowleveltype == UnsignedLongLong: - log.debug('explicit cast_float_to_ulonglong') - return llops.genop('cast_float_to_ulonglong', [v], resulttype=UnsignedLongLong) - return NotImplemented - -class __extend__(pairtype(BoolRepr, FloatRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Bool and r_to.lowleveltype == Float: - log.debug('explicit cast_bool_to_float') - return llops.genop('cast_bool_to_float', [v], resulttype=Float) - return NotImplemented - -class __extend__(pairtype(FloatRepr, BoolRepr)): - def convert_from_to((r_from, r_to), v, llops): - if r_from.lowleveltype == Float and r_to.lowleveltype == Bool: - log.debug('explicit cast_float_to_bool') - return llops.genop('float_is_true', [v], resulttype=Bool) - return NotImplemented - # ______________________________________________________________________ # Support for r_singlefloat and r_longfloat from rpython.rlib.rarithmetic diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -8,9 +8,166 @@ from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, Char, UniChar, UnsignedLongLong, SignedLongLong, build_number, Number, cast_primitive, typeOf, SignedLongLongLong) -from rpython.rtyper.rmodel import IntegerRepr, inputconst, log +from rpython.rtyper.rfloat import FloatRepr +from rpython.rtyper.rmodel import inputconst, log from rpython.tool.pairtype import pairtype +class IntegerRepr(FloatRepr): + def __init__(self, lowleveltype, opprefix): + self.lowleveltype = lowleveltype + self._opprefix = opprefix + self.as_int = self + + @property + def opprefix(self): + if self._opprefix is None: + raise TyperError("arithmetic not supported on %r, its size is too small" % + self.lowleveltype) + return self._opprefix + + def convert_const(self, value): + if isinstance(value, objectmodel.Symbolic): + return value + T = typeOf(value) + if isinstance(T, Number) or T is Bool: + return cast_primitive(self.lowleveltype, value) + raise TyperError("not an integer: %r" % (value,)) + + def get_ll_eq_function(self): + if getattr(self, '_opprefix', '?') is None: + return ll_eq_shortint + return None + + def get_ll_ge_function(self): + return None + get_ll_gt_function = get_ll_ge_function + get_ll_lt_function = get_ll_ge_function + get_ll_le_function = get_ll_ge_function + + def get_ll_hash_function(self): + if (sys.maxint == 2147483647 and + self.lowleveltype in (SignedLongLong, UnsignedLongLong)): + return ll_hash_long_long + return ll_hash_int + + get_ll_fasthash_function = get_ll_hash_function + + def get_ll_dummyval_obj(self, rtyper, s_value): + # if >= 0, then all negative values are special + if s_value.nonneg and self.lowleveltype is Signed: + return signed_repr # whose ll_dummy_value is -1 + else: + return None + + ll_dummy_value = -1 + + def rtype_chr(_, hop): + vlist = hop.inputargs(Signed) + if hop.has_implicit_exception(ValueError): + hop.exception_is_here() + hop.gendirectcall(ll_check_chr, vlist[0]) + else: + hop.exception_cannot_occur() + return hop.genop('cast_int_to_char', vlist, resulttype=Char) + + def rtype_unichr(_, hop): + vlist = hop.inputargs(Signed) + if hop.has_implicit_exception(ValueError): + hop.exception_is_here() + hop.gendirectcall(ll_check_unichr, vlist[0]) + else: + hop.exception_cannot_occur() + return hop.genop('cast_int_to_unichar', vlist, resulttype=UniChar) + + def rtype_bool(self, hop): + assert self is self.as_int # rtype_is_true() is overridden in BoolRepr + vlist = hop.inputargs(self) + return hop.genop(self.opprefix + 'is_true', vlist, resulttype=Bool) + + #Unary arithmetic operations + + def rtype_abs(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + if hop.s_result.unsigned: + return vlist[0] + else: + return hop.genop(self.opprefix + 'abs', vlist, resulttype=self) + + def rtype_abs_ovf(self, hop): + self = self.as_int + if hop.s_result.unsigned: + raise TyperError("forbidden uint_abs_ovf") + else: + vlist = hop.inputargs(self) + hop.has_implicit_exception(OverflowError) # record we know about it + hop.exception_is_here() + return hop.genop(self.opprefix + 'abs_ovf', vlist, resulttype=self) + + def rtype_invert(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + return hop.genop(self.opprefix + 'invert', vlist, resulttype=self) + + def rtype_neg(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + if hop.s_result.unsigned: + # implement '-r_uint(x)' with unsigned subtraction '0 - x' + zero = self.lowleveltype._defl() + vlist.insert(0, hop.inputconst(self.lowleveltype, zero)) + return hop.genop(self.opprefix + 'sub', vlist, resulttype=self) + else: + return hop.genop(self.opprefix + 'neg', vlist, resulttype=self) + + def rtype_neg_ovf(self, hop): + self = self.as_int + if hop.s_result.unsigned: + # this is supported (and turns into just 0-x) for rbigint.py + hop.exception_cannot_occur() + return self.rtype_neg(hop) + else: + vlist = hop.inputargs(self) + hop.has_implicit_exception(OverflowError) # record we know about it + hop.exception_is_here() + return hop.genop(self.opprefix + 'neg_ovf', vlist, resulttype=self) + + def rtype_pos(self, hop): + self = self.as_int + vlist = hop.inputargs(self) + return vlist[0] + + def rtype_int(self, hop): + if self.lowleveltype in (Unsigned, UnsignedLongLong): + raise TyperError("use intmask() instead of int(r_uint(...))") + vlist = hop.inputargs(Signed) + hop.exception_cannot_occur() + return vlist[0] + + def rtype_float(_, hop): + vlist = hop.inputargs(Float) + hop.exception_cannot_occur() + return vlist[0] + + @jit.elidable + def ll_str(self, i): + from rpython.rtyper.lltypesystem.ll_str import ll_int2dec + return ll_int2dec(i) + + def rtype_hex(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2hex + self = self.as_int + varg = hop.inputarg(self, 0) + true = inputconst(Bool, True) + return hop.gendirectcall(ll_int2hex, varg, true) + + def rtype_oct(self, hop): + from rpython.rtyper.lltypesystem.ll_str import ll_int2oct + self = self.as_int + varg = hop.inputarg(self, 0) + true = inputconst(Bool, True) + return hop.gendirectcall(ll_int2oct, varg, true) + _integer_reprs = {} def getintegerrepr(lltype, prefix=None): @@ -235,156 +392,11 @@ repr = hop.rtyper.getrepr(annmodel.unionof(s_int1, s_int2)).as_int vlist = hop.inputargs(repr, repr) hop.exception_is_here() - return hop.genop(repr.opprefix+func, vlist, resulttype=Bool) + return hop.genop(repr.opprefix + func, vlist, resulttype=Bool) # -class __extend__(IntegerRepr): - - def convert_const(self, value): - if isinstance(value, objectmodel.Symbolic): - return value - T = typeOf(value) - if isinstance(T, Number) or T is Bool: - return cast_primitive(self.lowleveltype, value) - raise TyperError("not an integer: %r" % (value,)) - - def get_ll_eq_function(self): - if getattr(self, '_opprefix', '?') is None: - return ll_eq_shortint - return None - - def get_ll_ge_function(self): - return None - get_ll_gt_function = get_ll_ge_function - get_ll_lt_function = get_ll_ge_function - get_ll_le_function = get_ll_ge_function - - def get_ll_hash_function(self): - if (sys.maxint == 2147483647 and - self.lowleveltype in (SignedLongLong, UnsignedLongLong)): - return ll_hash_long_long - return ll_hash_int - - get_ll_fasthash_function = get_ll_hash_function - - def get_ll_dummyval_obj(self, rtyper, s_value): - # if >= 0, then all negative values are special - if s_value.nonneg and self.lowleveltype is Signed: - return signed_repr # whose ll_dummy_value is -1 - else: - return None - - ll_dummy_value = -1 - - def rtype_chr(_, hop): - vlist = hop.inputargs(Signed) - if hop.has_implicit_exception(ValueError): - hop.exception_is_here() - hop.gendirectcall(ll_check_chr, vlist[0]) - else: - hop.exception_cannot_occur() - return hop.genop('cast_int_to_char', vlist, resulttype=Char) - - def rtype_unichr(_, hop): - vlist = hop.inputargs(Signed) - if hop.has_implicit_exception(ValueError): - hop.exception_is_here() - hop.gendirectcall(ll_check_unichr, vlist[0]) - else: - hop.exception_cannot_occur() - return hop.genop('cast_int_to_unichar', vlist, resulttype=UniChar) - - def rtype_bool(self, hop): - assert self is self.as_int # rtype_is_true() is overridden in BoolRepr - vlist = hop.inputargs(self) - return hop.genop(self.opprefix + 'is_true', vlist, resulttype=Bool) - - #Unary arithmetic operations - - def rtype_abs(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - if hop.s_result.unsigned: - return vlist[0] - else: - return hop.genop(self.opprefix + 'abs', vlist, resulttype=self) - - def rtype_abs_ovf(self, hop): - self = self.as_int - if hop.s_result.unsigned: - raise TyperError("forbidden uint_abs_ovf") - else: - vlist = hop.inputargs(self) - hop.has_implicit_exception(OverflowError) # record we know about it - hop.exception_is_here() - return hop.genop(self.opprefix + 'abs_ovf', vlist, resulttype=self) - - def rtype_invert(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - return hop.genop(self.opprefix + 'invert', vlist, resulttype=self) - - def rtype_neg(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - if hop.s_result.unsigned: - # implement '-r_uint(x)' with unsigned subtraction '0 - x' - zero = self.lowleveltype._defl() - vlist.insert(0, hop.inputconst(self.lowleveltype, zero)) - return hop.genop(self.opprefix + 'sub', vlist, resulttype=self) - else: - return hop.genop(self.opprefix + 'neg', vlist, resulttype=self) - - def rtype_neg_ovf(self, hop): - self = self.as_int - if hop.s_result.unsigned: - # this is supported (and turns into just 0-x) for rbigint.py - hop.exception_cannot_occur() - return self.rtype_neg(hop) - else: - vlist = hop.inputargs(self) - hop.has_implicit_exception(OverflowError) # record we know about it - hop.exception_is_here() - return hop.genop(self.opprefix + 'neg_ovf', vlist, resulttype=self) - - def rtype_pos(self, hop): - self = self.as_int - vlist = hop.inputargs(self) - return vlist[0] - - def rtype_int(self, hop): - if self.lowleveltype in (Unsigned, UnsignedLongLong): - raise TyperError("use intmask() instead of int(r_uint(...))") - vlist = hop.inputargs(Signed) - hop.exception_cannot_occur() - return vlist[0] - - def rtype_float(_, hop): - vlist = hop.inputargs(Float) - hop.exception_cannot_occur() - return vlist[0] - - @jit.elidable - def ll_str(self, i): - from rpython.rtyper.lltypesystem.ll_str import ll_int2dec - return ll_int2dec(i) - - def rtype_hex(self, hop): - from rpython.rtyper.lltypesystem.ll_str import ll_int2hex - self = self.as_int - varg = hop.inputarg(self, 0) - true = inputconst(Bool, True) - return hop.gendirectcall(ll_int2hex, varg, true) - - def rtype_oct(self, hop): - from rpython.rtyper.lltypesystem.ll_str import ll_int2oct - self = self.as_int - varg = hop.inputarg(self, 0) - true = inputconst(Bool, True) - return hop.gendirectcall(ll_int2oct, varg, true) - def ll_hash_int(n): return intmask(n) @@ -407,3 +419,38 @@ return else: raise ValueError + +# +# _________________________ Conversions _________________________ + +class __extend__(pairtype(IntegerRepr, FloatRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Float: + log.debug('explicit cast_uint_to_float') + return llops.genop('cast_uint_to_float', [v], resulttype=Float) + if r_from.lowleveltype == Signed and r_to.lowleveltype == Float: + log.debug('explicit cast_int_to_float') + return llops.genop('cast_int_to_float', [v], resulttype=Float) + if r_from.lowleveltype == SignedLongLong and r_to.lowleveltype == Float: + log.debug('explicit cast_longlong_to_float') + return llops.genop('cast_longlong_to_float', [v], resulttype=Float) + if r_from.lowleveltype == UnsignedLongLong and r_to.lowleveltype == Float: + log.debug('explicit cast_ulonglong_to_float') + return llops.genop('cast_ulonglong_to_float', [v], resulttype=Float) + return NotImplemented + +class __extend__(pairtype(FloatRepr, IntegerRepr)): + def convert_from_to((r_from, r_to), v, llops): + if r_from.lowleveltype == Float and r_to.lowleveltype == Unsigned: + log.debug('explicit cast_float_to_uint') + return llops.genop('cast_float_to_uint', [v], resulttype=Unsigned) + if r_from.lowleveltype == Float and r_to.lowleveltype == Signed: + log.debug('explicit cast_float_to_int') + return llops.genop('cast_float_to_int', [v], resulttype=Signed) + if r_from.lowleveltype == Float and r_to.lowleveltype == SignedLongLong: + log.debug('explicit cast_float_to_longlong') + return llops.genop('cast_float_to_longlong', [v], resulttype=SignedLongLong) + if r_from.lowleveltype == Float and r_to.lowleveltype == UnsignedLongLong: + log.debug('explicit cast_float_to_ulonglong') + return llops.genop('cast_float_to_ulonglong', [v], resulttype=UnsignedLongLong) + return NotImplemented diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -9,7 +9,8 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import typeOf, Ptr, Void, Signed, Bool from rpython.rtyper.lltypesystem.lltype import nullptr, Char, UniChar, Number -from rpython.rtyper.rmodel import Repr, IteratorRepr, IntegerRepr +from rpython.rtyper.rmodel import Repr, IteratorRepr +from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import AbstractStringRepr, AbstractCharRepr from rpython.tool.pairtype import pairtype, pair diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,7 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (Void, Bool, Float, typeOf, +from rpython.rtyper.lltypesystem.lltype import (Void, Bool, typeOf, LowLevelType, isCompatibleType) from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -322,32 +322,6 @@ return NotImplemented # ____________________________________________________________ -# Primitive Repr classes, in the same hierarchical order as -# the corresponding SomeObjects - -class FloatRepr(Repr): - lowleveltype = Float - -class IntegerRepr(FloatRepr): - def __init__(self, lowleveltype, opprefix): - self.lowleveltype = lowleveltype - self._opprefix = opprefix - self.as_int = self - - def _get_opprefix(self): - if self._opprefix is None: - raise TyperError("arithmetic not supported on %r, its size is too small" % - self.lowleveltype) - return self._opprefix - - opprefix = property(_get_opprefix) - -class BoolRepr(IntegerRepr): - lowleveltype = Bool - # NB. no 'opprefix' here. Use 'as_int' systematically. - def __init__(self): - from rpython.rtyper.rint import signed_repr - self.as_int = signed_repr class VoidRepr(Repr): lowleveltype = Void diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -5,7 +5,8 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import Repr, IntegerRepr +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -2,7 +2,8 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Void, Ptr from rpython.rtyper.rlist import dum_nocheck, dum_checkidx -from rpython.rtyper.rmodel import Repr, IntegerRepr, IteratorRepr +from rpython.rtyper.rmodel import Repr, IteratorRepr +from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -3,7 +3,9 @@ from rpython.rtyper import rint from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar -from rpython.rtyper.rmodel import IntegerRepr, IteratorRepr, inputconst, Repr +from rpython.rtyper.rmodel import IteratorRepr, inputconst, Repr +from rpython.rtyper.rint import IntegerRepr +from rpython.rtyper.rfloat import FloatRepr from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name from rpython.tool.staticmethods import StaticMethods @@ -35,91 +37,6 @@ startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) - -class AbstractCharRepr(AbstractStringRepr): - def rtype_method_lower(self, hop): - char_repr = hop.args_r[0].char_repr - v_chr, = hop.inputargs(char_repr) - hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_lower_char, v_chr) - - def rtype_method_upper(self, hop): - char_repr = hop.args_r[0].char_repr - v_chr, = hop.inputargs(char_repr) - hop.exception_cannot_occur() - return hop.gendirectcall(self.ll.ll_upper_char, v_chr) - - -class AbstractUniCharRepr(AbstractStringRepr): - pass - -class AbstractUnicodeRepr(AbstractStringRepr): - - def __init__(self, *args): - AbstractStringRepr.__init__(self, *args) - self.runicode_encode_utf_8 = None - - def ensure_ll_encode_utf8(self): - from rpython.rlib.runicode import unicode_encode_utf_8_impl - self.runicode_encode_utf_8 = func_with_new_name( - unicode_encode_utf_8_impl, 'runicode_encode_utf_8') - - def rtype_method_upper(self, hop): - raise TypeError("Cannot do toupper on unicode string") - - def rtype_method_lower(self, hop): - raise TypeError("Cannot do tolower on unicode string") - - @jit.elidable - def ll_encode_utf8(self, ll_s): - from rpython.rtyper.annlowlevel import hlunicode - s = hlunicode(ll_s) - assert s is not None - bytes = self.runicode_encode_utf_8( - s, len(s), 'strict', - errorhandler=self.ll_raise_unicode_exception_encode, - allow_surrogates=False) - return self.ll.llstr(bytes) - - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, - startingpos, endingpos): - raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) - -class __extend__(annmodel.SomeString): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import string_repr - return string_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeUnicodeString): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import unicode_repr - return unicode_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeChar): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import char_repr - return char_repr - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(annmodel.SomeUnicodeCodePoint): - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rstr import unichar_repr - return unichar_repr - - def rtyper_makekey(self): - return self.__class__, - - -class __extend__(AbstractStringRepr): - def _str_reprs(self, hop): return hop.args_r[0].repr, hop.args_r[1].repr @@ -405,7 +322,7 @@ return hop.inputconst(hop.r_result, hop.s_result.const) repr = hop.args_r[0].repr v_str = hop.inputarg(repr, 0) - if repr == hop.r_result: # the argument is a unicode string already + if repr == hop.r_result: # the argument is a unicode string already hop.exception_cannot_occur() return v_str hop.exception_is_here() @@ -445,7 +362,46 @@ else: return self.ll.ll_constant('None') -class __extend__(AbstractUnicodeRepr): + def rtype_getslice(r_str, hop): + string_repr = r_str.repr + v_str = hop.inputarg(string_repr, arg=0) + kind, vlist = hop.decompose_slice_args() + ll_fn = getattr(r_str.ll, 'll_stringslice_%s' % (kind,)) + return hop.gendirectcall(ll_fn, v_str, *vlist) + + +class AbstractUnicodeRepr(AbstractStringRepr): + + def __init__(self, *args): + AbstractStringRepr.__init__(self, *args) + self.runicode_encode_utf_8 = None + + def ensure_ll_encode_utf8(self): + from rpython.rlib.runicode import unicode_encode_utf_8_impl + self.runicode_encode_utf_8 = func_with_new_name( + unicode_encode_utf_8_impl, 'runicode_encode_utf_8') + + def rtype_method_upper(self, hop): + raise TypeError("Cannot do toupper on unicode string") + + def rtype_method_lower(self, hop): + raise TypeError("Cannot do tolower on unicode string") + + @jit.elidable + def ll_encode_utf8(self, ll_s): + from rpython.rtyper.annlowlevel import hlunicode + s = hlunicode(ll_s) + assert s is not None + bytes = self.runicode_encode_utf_8( + s, len(s), 'strict', + errorhandler=self.ll_raise_unicode_exception_encode, + allow_surrogates=False) + return self.ll.llstr(bytes) + + def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + startingpos, endingpos): + raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) + def rtype_method_encode(self, hop): if not hop.args_s[1].is_constant(): raise TyperError("encoding must be constant") @@ -466,6 +422,117 @@ else: raise TyperError("encoding %s not implemented" % (encoding, )) +class BaseCharReprMixin(object): + + def convert_const(self, value): + if not isinstance(value, str) or len(value) != 1: + raise TyperError("not a character: %r" % (value,)) + return value + + def get_ll_eq_function(self): + return None + + def get_ll_hash_function(self): + return self.ll.ll_char_hash + + get_ll_fasthash_function = get_ll_hash_function + + def rtype_len(_, hop): + return hop.inputconst(Signed, 1) + + def rtype_bool(_, hop): + assert not hop.args_s[0].can_be_None + return hop.inputconst(Bool, True) + + def rtype_ord(_, hop): + repr = hop.args_r[0].char_repr + vlist = hop.inputargs(repr) + return hop.genop('cast_char_to_int', vlist, resulttype=Signed) + + def _rtype_method_isxxx(_, llfn, hop): + repr = hop.args_r[0].char_repr + vlist = hop.inputargs(repr) + hop.exception_cannot_occur() + return hop.gendirectcall(llfn, vlist[0]) + + def rtype_method_isspace(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isspace, hop) + + def rtype_method_isdigit(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isdigit, hop) + + def rtype_method_isalpha(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isalpha, hop) + + def rtype_method_isalnum(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isalnum, hop) + + def rtype_method_isupper(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_isupper, hop) + + def rtype_method_islower(self, hop): + return self._rtype_method_isxxx(self.ll.ll_char_islower, hop) + + +class AbstractCharRepr(BaseCharReprMixin, AbstractStringRepr): + def rtype_method_lower(self, hop): + char_repr = hop.args_r[0].char_repr + v_chr, = hop.inputargs(char_repr) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_lower_char, v_chr) + + def rtype_method_upper(self, hop): + char_repr = hop.args_r[0].char_repr + v_chr, = hop.inputargs(char_repr) + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll.ll_upper_char, v_chr) + + def ll_str(self, ch): + return self.ll.ll_chr2str(ch) + + +class AbstractUniCharRepr(BaseCharReprMixin, AbstractStringRepr): + + def ll_str(self, ch): + # xxx suboptimal, maybe + return str(unicode(ch)) + + def ll_unicode(self, ch): + return unicode(ch) + + +class __extend__(annmodel.SomeString): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import string_repr + return string_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeUnicodeString): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import unicode_repr + return unicode_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeChar): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import char_repr + return char_repr + + def rtyper_makekey(self): + return self.__class__, + +class __extend__(annmodel.SomeUnicodeCodePoint): + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.lltypesystem.rstr import unichar_repr + return unichar_repr + + def rtyper_makekey(self): + return self.__class__, + class __extend__(pairtype(AbstractStringRepr, Repr)): def rtype_mod((r_str, _), hop): @@ -474,6 +541,12 @@ return r_str.ll.do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) +class __extend__(pairtype(AbstractStringRepr, FloatRepr)): + def rtype_mod(_, hop): + from rpython.rtyper.lltypesystem.rstr import do_stringformat + return do_stringformat(hop, [(hop.args_v[1], hop.args_r[1])]) + + class __extend__(pairtype(AbstractStringRepr, IntegerRepr)): def rtype_getitem((r_str, r_int), hop, checkidx=False): string_repr = r_str.repr @@ -513,15 +586,6 @@ rtype_inplace_mul = rtype_mul -class __extend__(AbstractStringRepr): - - def rtype_getslice(r_str, hop): - string_repr = r_str.repr - v_str = hop.inputarg(string_repr, arg=0) - kind, vlist = hop.decompose_slice_args() - ll_fn = getattr(r_str.ll, 'll_stringslice_%s' % (kind,)) - return hop.gendirectcall(ll_fn, v_str, *vlist) - class __extend__(pairtype(AbstractStringRepr, AbstractStringRepr)): def rtype_add((r_str1, r_str2), hop): str1_repr = r_str1.repr @@ -585,65 +649,6 @@ return hop.gendirectcall(r_str.ll.ll_contains, v_str, v_chr) -class __extend__(AbstractCharRepr): - def ll_str(self, ch): - return self.ll.ll_chr2str(ch) - -class __extend__(AbstractUniCharRepr): - def ll_str(self, ch): - # xxx suboptimal, maybe - return str(unicode(ch)) - - def ll_unicode(self, ch): - return unicode(ch) - -class __extend__(AbstractCharRepr, - AbstractUniCharRepr): - - def convert_const(self, value): - if not isinstance(value, str) or len(value) != 1: - raise TyperError("not a character: %r" % (value,)) - return value - - def get_ll_eq_function(self): - return None - - def get_ll_hash_function(self): - return self.ll.ll_char_hash - - get_ll_fasthash_function = get_ll_hash_function - - def rtype_len(_, hop): - return hop.inputconst(Signed, 1) - - def rtype_bool(_, hop): - assert not hop.args_s[0].can_be_None - return hop.inputconst(Bool, True) - - def rtype_ord(_, hop): - repr = hop.args_r[0].char_repr - vlist = hop.inputargs(repr) - return hop.genop('cast_char_to_int', vlist, resulttype=Signed) - - def _rtype_method_isxxx(_, llfn, hop): - repr = hop.args_r[0].char_repr - vlist = hop.inputargs(repr) - hop.exception_cannot_occur() - return hop.gendirectcall(llfn, vlist[0]) - - def rtype_method_isspace(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isspace, hop) - def rtype_method_isdigit(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isdigit, hop) - def rtype_method_isalpha(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isalpha, hop) - def rtype_method_isalnum(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isalnum, hop) - def rtype_method_isupper(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_isupper, hop) - def rtype_method_islower(self, hop): - return self._rtype_method_isxxx(self.ll.ll_char_islower, hop) - class __extend__(pairtype(AbstractCharRepr, IntegerRepr), pairtype(AbstractUniCharRepr, IntegerRepr)): diff --git a/rpython/rtyper/rtuple.py b/rpython/rtyper/rtuple.py --- a/rpython/rtyper/rtuple.py +++ b/rpython/rtyper/rtuple.py @@ -9,8 +9,9 @@ Void, Signed, Bool, Ptr, GcStruct, malloc, typeOf, nullptr) from rpython.rtyper.lltypesystem.rstr import LLHelpers from rpython.rtyper.rstr import AbstractStringRepr -from rpython.rtyper.rmodel import (Repr, IntegerRepr, inputconst, IteratorRepr, +from rpython.rtyper.rmodel import (Repr, inputconst, IteratorRepr, externalvsinternal) +from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -661,7 +661,6 @@ def setup(self): rtyper = self.rtyper spaceop = self.spaceop - self.nb_args = len(spaceop.args) self.args_v = list(spaceop.args) self.args_s = [rtyper.binding(a) for a in spaceop.args] self.s_result = rtyper.binding(spaceop.result) @@ -669,6 +668,10 @@ self.r_result = rtyper.getrepr(self.s_result) rtyper.call_all_setups() # compute ForwardReferences now + @property + def nb_args(self): + return len(self.args_v) + def copy(self): result = HighLevelOp(self.rtyper, self.spaceop, self.exceptionlinks, self.llops) @@ -726,7 +729,6 @@ def r_s_pop(self, index=-1): "Return and discard the argument with index position." - self.nb_args -= 1 self.args_v.pop(index) return self.args_r.pop(index), self.args_s.pop(index) @@ -739,7 +741,6 @@ self.args_v.insert(0, v_newfirstarg) self.args_r.insert(0, r_newfirstarg) self.args_s.insert(0, s_newfirstarg) - self.nb_args += 1 def swap_fst_snd_args(self): self.args_v[0], self.args_v[1] = self.args_v[1], self.args_v[0] diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,7 +522,7 @@ # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations - 'bextr', + 'andn', 'bextr', 'blsi', 'blsmask', 'blsr', 'tzcnt', 'lzcnt', ]) # a partial list is hopefully good enough for now; it's all to support diff --git a/rpython/translator/c/src/allocator.c b/rpython/translator/c/src/allocator.c --- a/rpython/translator/c/src/allocator.c +++ b/rpython/translator/c/src/allocator.c @@ -24,5 +24,10 @@ # include "src/obmalloc.c" #endif +#elif defined _MSC_VER +/* link will fail without some kind of definition for the functions */ + void *PyObject_Malloc(size_t n) { return NULL; } + void *PyObject_Realloc(void *p, size_t n) { return NULL; } + void PyObject_Free(void *p) { } #endif /* PYPY_STANDALONE */ diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -1,5 +1,5 @@ import py -import os, time, sys +import os, time, sys, genericpath from rpython.tool.udir import udir from rpython.rlib.rarithmetic import r_longlong from rpython.annotator import model as annmodel @@ -243,14 +243,16 @@ assert f() == False def test_os_path_isdir(): + # os.path.isdir is not rpython once pywin is installed (win32 specific) + # genericpath.isdir is better. directory = "./." def fn(): - return os.path.isdir(directory) + return genericpath.isdir(directory) f = compile(fn, []) assert f() == True directory = "some/random/name" def fn(): - return os.path.isdir(directory) + return genericpath.isdir(directory) f = compile(fn, []) assert f() == False @@ -298,7 +300,7 @@ return os.getcwd() f1 = compile(does_stuff, [str]) if os.name == 'nt': - assert f1(os.environment['TEMP']) == os.path.realpath(os.environment['TEMP']) + assert f1(os.environ['TEMP']) == os.path.realpath(os.environ['TEMP']) else: assert f1('/tmp') == os.path.realpath('/tmp') diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -304,8 +304,13 @@ assert " ll_strtod.o" in makefile def test_debug_print_start_stop(self): + import sys from rpython.rtyper.lltypesystem import rffi - + if sys.platform == 'win32': + # ftell(stderr) is a bit different under subprocess.Popen + tell = 0 + else: + tell = -1 def entry_point(argv): x = "got:" debug_start ("mycat") @@ -327,7 +332,7 @@ t, cbuilder = self.compile(entry_point) # check with PYPYLOG undefined out, err = cbuilder.cmdexec("", err=True, env={}) - assert out.strip() == 'got:a.-1.' + assert out.strip() == 'got:a.%d.' % tell assert 'toplevel' in err assert 'mycat' not in err assert 'foo 2 bar 3' not in err @@ -336,7 +341,7 @@ assert 'bok' not in err # check with PYPYLOG defined to an empty string (same as undefined) out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': ''}) - assert out.strip() == 'got:a.-1.' + assert out.strip() == 'got:a.%d.' % tell assert 'toplevel' in err assert 'mycat' not in err assert 'foo 2 bar 3' not in err @@ -345,7 +350,7 @@ assert 'bok' not in err # check with PYPYLOG=:- (means print to stderr) out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': ':-'}) - assert out.strip() == 'got:bcda.-1.' + assert out.strip() == 'got:bcda.%d.' % tell assert 'toplevel' in err assert '{mycat' in err assert 'mycat}' in err @@ -374,20 +379,24 @@ assert 'bok' in data # check with PYPYLOG=somefilename path = udir.join('test_debug_xxx_prof.log') - out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': str(path)}) - size = os.stat(str(path)).st_size - assert out.strip() == 'got:a.' + str(size) + '.' - assert not err - assert path.check(file=1) - data = path.read() - assert 'toplevel' in data - assert '{mycat' in data - assert 'mycat}' in data - assert 'foo 2 bar 3' not in data - assert '{cat2' in data - assert 'cat2}' in data - assert 'baz' not in data - assert 'bok' not in data + if str(path).find(':')>=0: + # bad choice of udir, there is a ':' in it which messes up the test + pass + else: + out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': str(path)}) + size = os.stat(str(path)).st_size + assert out.strip() == 'got:a.' + str(size) + '.' + assert not err + assert path.check(file=1) + data = path.read() + assert 'toplevel' in data + assert '{mycat' in data + assert 'mycat}' in data + assert 'foo 2 bar 3' not in data + assert '{cat2' in data + assert 'cat2}' in data + assert 'baz' not in data + assert 'bok' not in data # check with PYPYLOG=myc:somefilename (includes mycat but not cat2) path = udir.join('test_debug_xxx_myc.log') out, err = cbuilder.cmdexec("", err=True, @@ -1297,7 +1306,10 @@ t, cbuilder = self.compile(entry_point, shared=True, entrypoints=[f]) + ext_suffix = '.so' + if cbuilder.eci.platform.name == 'msvc': + ext_suffix = '.dll' libname = cbuilder.executable_name.join('..', 'lib' + - cbuilder.modulename + '.so') + cbuilder.modulename + ext_suffix) lib = ctypes.CDLL(str(libname)) assert lib.foo(13) == 16 From noreply at buildbot.pypy.org Wed May 14 22:50:17 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 May 2014 22:50:17 +0200 (CEST) Subject: [pypy-commit] pypy default: ups Message-ID: <20140514205017.4EFCE1C02D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71522:55d012c01e88 Date: 2014-05-14 22:49 +0200 http://bitbucket.org/pypy/pypy/changeset/55d012c01e88/ Log: ups diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -9,6 +9,7 @@ from rpython.tool.pairtype import pairtype, pair from rpython.tool.sourcetools import func_with_new_name from rpython.tool.staticmethods import StaticMethods +from rpython.rlib.rstring import UnicodeBuilder class AbstractStringRepr(Repr): @@ -27,11 +28,12 @@ from rpython.rtyper.annlowlevel import hlstr value = hlstr(llvalue) assert value is not None - univalue, _ = self.rstr_decode_utf_8( + result = UnicodeBuilder(len(value)) + self.rstr_decode_utf_8( value, len(value), 'strict', final=False, errorhandler=self.ll_raise_unicode_exception_decode, - allow_surrogates=False) - return self.ll.llunicode(univalue) + allow_surrogates=False, result=result) + return self.ll.llunicode(result.build()) def ll_raise_unicode_exception_decode(self, errors, encoding, msg, s, startingpos, endingpos): From noreply at buildbot.pypy.org Wed May 14 22:54:07 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 May 2014 22:54:07 +0200 (CEST) Subject: [pypy-commit] pypy default: move some utilities to rpython.annotator.bookkeeper Message-ID: <20140514205407.ACC311C02D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71523:7a2c9e5f2782 Date: 2014-05-14 21:52 +0100 http://bitbucket.org/pypy/pypy/changeset/7a2c9e5f2782/ Log: move some utilities to rpython.annotator.bookkeeper diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -14,16 +14,12 @@ SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) -from rpython.annotator.bookkeeper import getbookkeeper +from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -27,6 +27,14 @@ from rpython.rtyper import extregistry +BUILTIN_ANALYZERS = {} + +def analyzer_for(func): + def wrapped(ann_func): + BUILTIN_ANALYZERS[func] = ann_func + return func + return wrapped + class Bookkeeper(object): """The log of choices that have been made while analysing the operations. It ensures that the same 'choice objects' will be returned if we ask @@ -600,6 +608,7 @@ return False else: return True + # get current bookkeeper def getbookkeeper(): @@ -610,7 +619,8 @@ except AttributeError: return None +def immutablevalue(x): + return getbookkeeper().immutablevalue(x) + def delayed_imports(): - # import ordering hack - global BUILTIN_ANALYZERS - from rpython.annotator.builtin import BUILTIN_ANALYZERS + import rpython.annotator.builtin diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -10,16 +10,13 @@ SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) from rpython.rtyper.llannotation import ( SomeAddress, annotation_to_lltype, lltype_to_annotation, ll_to_annotation) -from rpython.annotator.bookkeeper import getbookkeeper +from rpython.annotator.bookkeeper import ( + getbookkeeper, immutablevalue, BUILTIN_ANALYZERS, analyzer_for) from rpython.annotator import description from rpython.flowspace.model import Constant import rpython.rlib.rarithmetic import rpython.rlib.objectmodel -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - def constpropagate(func, args_s, s_result): """Returns s_result unless all args are constants, in which case the func() is called and a constant result is returned (it must be contained @@ -44,14 +41,6 @@ func, args, realresult, s_result)) return s_realresult -BUILTIN_ANALYZERS = {} - -def analyzer_for(func): - def wrapped(ann_func): - BUILTIN_ANALYZERS[func] = ann_func - return func - return wrapped - # ____________________________________________________________ def builtin_range(*args): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -11,15 +11,11 @@ SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) -from rpython.annotator.bookkeeper import getbookkeeper +from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? from rpython.annotator.model import AnnotatorError -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) From noreply at buildbot.pypy.org Thu May 15 00:35:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 15 May 2014 00:35:07 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix potential UnicodeEncodeErrors under pytest.py --resultlog: the captured Message-ID: <20140514223507.C657A1D29B2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71524:d67eb7fe33cd Date: 2014-05-14 12:10 -0700 http://bitbucket.org/pypy/pypy/changeset/d67eb7fe33cd/ Log: fix potential UnicodeEncodeErrors under pytest.py --resultlog: the captured output may come in as unicode (grafted from ef6d8454c0a6cd10097c82056727f5cef8b7830e) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -56,6 +56,9 @@ for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) for key, text in sections: + # py.io.StdCaptureFD may send in unicode + if isinstance(text, unicode): + text = text.encode('utf-8') py.builtin.print_(" ", file=self.logfile) py.builtin.print_(" -------------------- %s --------------------" % key.rstrip(), file=self.logfile) From noreply at buildbot.pypy.org Thu May 15 05:02:19 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 May 2014 05:02:19 +0200 (CEST) Subject: [pypy-commit] pypy default: don't import rtyper from the bookkeeper Message-ID: <20140515030219.EDB251D2B97@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71525:d45b68facb24 Date: 2014-05-15 04:01 +0100 http://bitbucket.org/pypy/pypy/changeset/d45b68facb24/ Log: don't import rtyper from the bookkeeper diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -13,8 +13,6 @@ s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, SomeByteArray, SomeConstantType) -from rpython.rtyper.llannotation import ( - SomeAddress, SomePtr, SomeLLADTMeth, lltype_to_annotation) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -23,7 +21,6 @@ from rpython.annotator.argument import ArgumentsForTranslation from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind -from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper import extregistry @@ -145,6 +142,7 @@ check_no_flags(clsdef) def consider_call_site(self, call_op): + from rpython.rtyper.llannotation import SomeLLADTMeth, lltype_to_annotation binding = self.annotator.binding s_callable = binding(call_op.args[0]) args_s = [binding(arg) for arg in call_op.args[1:]] @@ -305,10 +303,6 @@ elif extregistry.is_registered(x): entry = extregistry.lookup(x) result = entry.compute_annotation_bk(self) - elif isinstance(x, lltype._ptr): - result = SomePtr(lltype.typeOf(x)) - elif isinstance(x, llmemory.fakeaddress): - result = SomeAddress() elif tp is type: result = SomeConstantType(x, self) elif callable(x): diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -9,6 +9,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.tool.uid import uid from rpython.rlib.rarithmetic import is_valid_int +from rpython.rtyper.extregistry import ExtRegistryEntry class AddressOffset(Symbolic): @@ -529,6 +530,13 @@ else: return self +class fakeaddressEntry(ExtRegistryEntry): + _type_ = fakeaddress + + def compute_annotation(self): + from rpython.rtyper.llannotation import SomeAddress + return SomeAddress() + # ____________________________________________________________ class AddressAsInt(Symbolic): @@ -899,7 +907,6 @@ else: raise TypeError(T) -from rpython.rtyper.extregistry import ExtRegistryEntry class RawMemmoveEntry(ExtRegistryEntry): _about_ = raw_memmove diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -7,6 +7,7 @@ from types import NoneType from rpython.rlib.rarithmetic import maxint, is_valid_int, is_emulated_long import weakref +from rpython.rtyper.extregistry import ExtRegistryEntry class State(object): pass @@ -1407,6 +1408,14 @@ assert not '__dict__' in dir(_ptr) +class _ptrEntry(ExtRegistryEntry): + _type_ = _ptr + + def compute_annotation(self): + from rpython.rtyper.llannotation import SomePtr + return SomePtr(typeOf(self.instance)) + + class _interior_ptr(_abstract_ptr): __slots__ = ('_parent', '_offsets') def _set_parent(self, _parent): From noreply at buildbot.pypy.org Thu May 15 09:31:05 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 May 2014 09:31:05 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: move report to its own directory Message-ID: <20140515073105.B6A731C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5247:d439793c0d81 Date: 2014-05-15 09:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/d439793c0d81/ Log: move report to its own directory diff too long, truncating to 2000 out of 4364 lines diff --git a/talk/dls2014/IEEEbib.bst b/talk/dls2014/report/IEEEbib.bst rename from talk/dls2014/IEEEbib.bst rename to talk/dls2014/report/IEEEbib.bst diff --git a/talk/dls2014/Makefile b/talk/dls2014/report/Makefile rename from talk/dls2014/Makefile rename to talk/dls2014/report/Makefile diff --git a/talk/dls2014/bibl_conf.bib b/talk/dls2014/report/bibl_conf.bib rename from talk/dls2014/bibl_conf.bib rename to talk/dls2014/report/bibl_conf.bib diff --git a/talk/dls2014/mmap pages.pdf b/talk/dls2014/report/mmap pages.pdf rename from talk/dls2014/mmap pages.pdf rename to talk/dls2014/report/mmap pages.pdf diff --git a/talk/dls2014/page remapping.pdf b/talk/dls2014/report/page remapping.pdf rename from talk/dls2014/page remapping.pdf rename to talk/dls2014/report/page remapping.pdf diff --git a/talk/dls2014/report/report.lyx b/talk/dls2014/report/report.lyx new file mode 100644 --- /dev/null +++ b/talk/dls2014/report/report.lyx @@ -0,0 +1,4198 @@ +#LyX 2.1 created this file. For more info see http://www.lyx.org/ +\lyxformat 474 +\begin_document +\begin_header +\textclass article +\begin_preamble +% IEEE standard conference template; to be used with: +% spconf.sty - LaTeX style file, and +% IEEEbib.bst - IEEE bibliography style file. +% -------------------------------------------------------------------------- + +\usepackage{spconf} +\usepackage{multicol} + +% bold paragraph titles +\newcommand{\mypar}[1]{{\bf #1.}} + +% Title. +% ------ +\title{C7: Fast software transactional memory for dynamic languages} +% +% Single address. +% --------------- +%\name{Markus P\"uschel\thanks{The author thanks Jelena Kovacevic. This paper +%is a modified version of the template she used in her class.}} +%\address{Department of Computer Science\\ ETH Z\"urich\\Z\"urich, Switzerland} + +% For example: +% ------------ +%\address{School\\ +% Department\\ +% Address} +% +% Two addresses (uncomment and modify for two-address case). +% ---------------------------------------------------------- +\twoauthors + {Remi Meier} + {Department of Computer Science\\ + ETH Zürich\\ + Switzerland} + {Armin Rigo} + {...} + +% nice listings +\usepackage{xcolor} +\usepackage{newverbs} + +\usepackage{color} +\definecolor{verylightgray}{rgb}{0.93,0.93,0.93} +\definecolor{darkblue}{rgb}{0.2,0.2,0.6} +\definecolor{commentgreen}{rgb}{0.25,0.5,0.37} +\usepackage{letltxmacro} + +\usepackage{listings} +\makeatletter +\LetLtxMacro{\oldlstinline}{\lstinline} + +\renewcommand\lstinline[1][]{% +\Collectverb{\@@myverb}% +} + +\def\@@myverb#1{% + \begingroup + \fboxsep=0.2em + \colorbox{verylightgray}{\oldlstinline|#1|}% + \endgroup +} +\makeatother +\end_preamble +\use_default_options false +\begin_modules +enumitem +theorems-std +fixltx2e +\end_modules +\maintain_unincluded_children false +\begin_local_layout +#\DeclareLyXModule{gmlists} +#DescriptionBegin +#Adds quoted-list and condensed list environments. +#DescriptionEnd +#Requires: enumitem +#Author: Günter Milde +Format 49 +# Input enumitem.module +# Style Variants +# ============== +Style Enumerate-Alpha +CopyStyle Enumerate +LatexParam "[label=\emph{\alph*}),ref=\emph{\alph*},fullwidth,itemsep=1ex]" +Margin First_Dynamic +LeftMargin x +LabelType Static +LabelCounter enumi +LabelString "\alph{enumi})" +LabelFont +Series Medium +Shape Italic +EndFont +End +# Description with italic label was a failed experiment: +Style Description-Italic +ObsoletedBy Description +End +# Indented compact LyX-List environment +Style Quoted-Labeling +CopyStyle Labeling +LatexName qlyxlist +ItemSep 0 +ParSep 0 +LabelIndent MMM +Preamble +% labeling-like list based on enumitem's description list with +% mandatory second argument (label-pattern) and indent of 2em: +\newenvironment{qlyxlist}[2][]% +{\settowidth{\lyxlabelwidth}{#2} +\addtolength{\lyxlabelwidth}{1.5em} +\description[font=,style=sameline, +leftmargin=\lyxlabelwidth, +noitemsep, labelindent=1.5em, +#1]} +{\enddescription} +EndPreamble +End +Style Quoted-List +ObsoletedBy Quoted-Labeling +End +# Dense (condensed) list environments +# =================================== +Style Itemize-Compact +CopyStyle Itemize +LatexParam [noitemsep] +ParSep 0 +TopSep 0.4 +BottomSep 0.4 +End +Style Enumerate-Compact +CopyStyle Enumerate +LatexParam [noitemsep] +ParSep 0 +TopSep 0.4 +BottomSep 0.4 +End +Style Description-Compact +CopyStyle Description +LatexParam [noitemsep] +ParSep 0 +TopSep 0.4 +BottomSep 0.4 +End +Style Compact-Itemize +ObsoletedBy Itemize-Compact +End +Style Itemize-Dense +ObsoletedBy Itemize-Compact +End +Style Compact-Enumerate +ObsoletedBy Enumerate-Compact +End +Style Enumerate-Dense +ObsoletedBy Enumerate-Compact +End +Style Compact-Description +ObsoletedBy Description-Compact +End +Style Description-Dense +ObsoletedBy Description-Compact +End +\end_local_layout +\language english +\language_package none +\inputencoding utf8 +\fontencoding global +\font_roman default +\font_sans default +\font_typewriter default +\font_math auto +\font_default_family default +\use_non_tex_fonts false +\font_sc false +\font_osf false +\font_sf_scale 100 +\font_tt_scale 100 +\graphics default +\default_output_format default +\output_sync 1 +\bibtex_command default +\index_command default +\float_placement h +\paperfontsize default +\spacing single +\use_hyperref true +\pdf_bookmarks true +\pdf_bookmarksnumbered false +\pdf_bookmarksopen false +\pdf_bookmarksopenlevel 1 +\pdf_breaklinks false +\pdf_pdfborder false +\pdf_colorlinks false +\pdf_backref false +\pdf_pdfusetitle true +\papersize default +\use_geometry false +\use_package amsmath 2 +\use_package amssymb 2 +\use_package cancel 0 +\use_package esint 1 +\use_package mathdots 0 +\use_package mathtools 0 +\use_package mhchem 0 +\use_package stackrel 0 +\use_package stmaryrd 0 +\use_package undertilde 0 +\cite_engine basic +\cite_engine_type default +\biblio_style plain +\use_bibtopic false +\use_indices false +\paperorientation portrait +\suppress_date false +\justification true +\use_refstyle 0 +\index Index +\shortcut idx +\color #008000 +\end_index +\secnumdepth 3 +\tocdepth 3 +\paragraph_separation indent +\paragraph_indentation default +\quotes_language english +\papercolumns 1 +\papersides 1 +\paperpagestyle default +\listings_params "backgroundcolor={\color{verylightgray}},basicstyle={\scriptsize\ttfamily},commentstyle={\ttfamily\color{commentgreen}},keywordstyle={\bfseries\color{darkblue}},morecomment={[l]{//}},morekeywords={foreach,in,def,type,dynamic,Int,Boolean,infer,void,super,if,boolean,int,else,while,do,extends,class,assert,for,switch,case,private,protected,public,const,final,static,interface,new,true,false,null,return}" +\tracking_changes false +\output_changes false +\html_math_output 0 +\html_css_as_file 0 +\html_be_strict false +\end_header + +\begin_body + +\begin_layout Standard +\begin_inset ERT +status open + +\begin_layout Plain Layout + +% +\backslash +ninept +\end_layout + +\begin_layout Plain Layout + +\end_layout + +\end_inset + + +\begin_inset ERT +status open + +\begin_layout Plain Layout + + +\backslash +maketitle +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Abstract +... + +\end_layout + +\begin_layout Section +Introduction +\end_layout + +\begin_layout Standard +Dynamic languages like Python, PHP, Ruby, and JavaScript +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + + are usually regarded as very expressive but also very slow. + In recent years, the introduction of just-in-time compilers (JIT) for these + languages (e.g. + PyPy, V8, Tracemonkey +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + +) started to change this perception by delivering good performance that + enables new applications. + However, a parallel programming model was not part of the design of those + languages. + Thus, the reference implementations of e.g. + Python and Ruby use a single, global interpreter lock (GIL) to serialize + the execution of code in threads. +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +While this GIL prevents any parallelism from occurring, it also provides + some useful guarantees. + Since this lock is always acquired while executing bytecode instructions + and it may only be released in-between such instructions, it provides perfect + isolation and atomicity between multiple threads for a series of instructions. + Another technology that can provide the same guarantees is transactional + memory (TM). +\end_layout + +\begin_layout Standard +There have been several attempts at replacing the GIL with TM +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + +. + Using transactions to enclose multiple bytecode instructions, we can get + the very same semantics as the GIL while possibly executing several transaction +s in parallel. + Furthermore, by exposing these interpreter-level transactions to the applicatio +n in the form of +\emph on +atomic blocks +\emph default + +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + +, we give dynamic languages a new synchronization mechanism that avoids + several of the problems of locks +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + + as they are used now. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status open + +\begin_layout Plain Layout +introduction needs work +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +Our contributions include: +\end_layout + +\begin_layout Itemize-Compact +We introduce a new software transactional memory (STM) system that performs + well even on low numbers of CPUs. + It uses a novel combination of hardware features and garbage collector + (GC) integration in order to keep the overhead of STM very low. +\end_layout + +\begin_layout Itemize-Compact +This new STM system is used to replace the GIL in Python and is then evaluated + extensively. +\end_layout + +\begin_layout Itemize-Compact +We introduce atomic blocks to the Python language to provide a backwards + compatible, composable synchronization mechanism for threads. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +Do not start the introduction with the abstract or a slightly modified version. + It follows a possible structure of the introduction. + Note that the structure can be modified, but the content should be the + same. + Introduction and abstract should fill at most the first page, better less. +\end_layout + +\begin_layout Plain Layout +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + + +\backslash +mypar{ +\end_layout + +\end_inset + +Motivation +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + +} +\end_layout + +\end_inset + + The first task is to motivate what you do. + You can start general and zoom in one the specific problem you consider. + In the process you should have explained to the reader: what you are doing, + why you are doing, why it is important (order is usually reversed). +\end_layout + +\begin_layout Plain Layout +For example, if my result is the fastest sorting implementation ever, one + could roughly go as follows. + First explain why sorting is important (used everywhere with a few examples) + and why performance matters (large datasets, realtime). + Then explain that fast implementations are very hard and expensive to get + (memory hierarchy, vector, parallel). +\end_layout + +\begin_layout Plain Layout +Now you state what you do in this paper. + In our example: presenting a sorting implementation that is faster for + some sizes as all the other ones. +\end_layout + +\begin_layout Plain Layout +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + + +\backslash +mypar{ +\end_layout + +\end_inset + +Related work +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + +} +\end_layout + +\end_inset + + Next, you have to give a brief overview of related work. + For a report like this, anywhere between 2 and 8 references. + Briefly explain what they do. + In the end contrast to what you do to make now precisely clear what your + contribution is. +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Section +Background +\end_layout + +\begin_layout Subsection +Transactional Memory +\end_layout + +\begin_layout Standard +Transactional memory (TM) is a concurrency control mechanism that comes + from database systems. + Using transactions, we can group a series of instructions performing operations + on memory and make them happen atomically and in complete isolations from + other transactions. + +\emph on +Atomicity +\emph default + means that all these instructions in the transaction and their effects + seem to happen at one, undividable point in time. + Other transactions never see inconsistent state of a partially executed + transaction which is called +\emph on +isolation +\emph default +. +\end_layout + +\begin_layout Standard +If we start multiple such transactions in multiple threads, the TM system + guarantees that the outcome of running the transactions is +\emph on +serializable +\emph default +. + Meaning, the outcome is equal to some sequential execution of these transaction +s. + Overall, this is exactly what a single global lock guarantees while still + allowing the TM system to run transactions in parallel as an optimization. +\end_layout + +\begin_layout Subsection +Python +\end_layout + +\begin_layout Standard +We implement and evaluate our system for the Python language. + For the actual implementation, we chose the PyPy +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + + interpreter because replacing the GIL there with a TM system is just a + matter of adding a new transformation to the translation process of the + interpreter. +\end_layout + +\begin_layout Standard +Over the years, Python added multiple ways to provide concurrency and parallelis +m to its applications. + We want to highlight two of them, namely +\emph on +threading +\emph default +and +\emph on +multiprocessing +\emph default +. + +\end_layout + +\begin_layout Standard + +\emph on +Threading +\emph default + employs operating system (OS) threads to provide concurrency. + It is, however, limited by the GIL and thus does not provide parallelism. + At this point we should mention that it is indeed possible to run external + functions written in C instead of Python in parallel. + Our work focuses on Python itself and ignores this aspect as it requires + writing in a different language. +\end_layout + +\begin_layout Standard +The second approach, +\emph on +multiprocessing +\emph default +, uses multiple instances of the interpreter itself and runs them in separate + OS processes. + Here we actually get parallelism because we have one GIL per interpreter, + but of course we have the overhead of multiple processes / interpreters + and also need to exchange data between them explicitly and expensively. +\end_layout + +\begin_layout Standard +We focus on the +\emph on +threading +\emph default +approach. + This requires us to remove the GIL from our interpreter in order to run + code in parallel on multiple threads. + One approach to this is fine-grained locking instead of a single global + lock. + Jython +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +references +\end_layout + +\end_inset + + and IronPython are implementations of this. + It requires great care in order to avoid deadlocks, which is why we follow + the TM approach that provides a +\emph on +direct +\emph default +replacement for the GIL. + It does not require careful placing of locks in the right spots. + We will compare our work with Jython for evaluation. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status open + +\begin_layout Plain Layout +python, ways to parallelize (multi-process vs. + threading, fine-grained locking jython) +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Subsection +Synchronization +\end_layout + +\begin_layout Standard +It is well known that using locks to synchronize multiple threads is hard. + They are non-composable, have overhead, may deadlock, limit scalability, + and overall add a lot of complexity. + For a better parallel programming model for dynamic languages, we want + to add another, well-known synchronization mechanism: +\emph on +atomic blocks +\emph default +. + +\end_layout + +\begin_layout Standard +Atomic blocks are composable, deadlock-free, higher-level and expose useful + atomicity and isolation guarantees to the application for a series of instructi +ons. + An implementation using a GIL would simply guarantee that the GIL is not + released during the execution of the atomic block. + Using TM, we have the same effect by guaranteeing that all instructions + in an atomic block are executed inside a single transaction. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status open + +\begin_layout Plain Layout +atomic blocks, AME +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +STM, how atomicity & isolation +\end_layout + +\begin_layout Standard +reasons for overhead +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +Give a short, self-contained summary of necessary background information. + For example, assume you present an implementation of sorting algorithms. + You could organize into sorting definition, algorithms considered, and + asymptotic runtime statements. + The goal of the background section is to make the paper self-contained + for an audience as large as possible. + As in every section you start with a very brief overview of the section. + Here it could be as follows: In this section we formally define the sorting + problem we consider and introduce the algorithms we use including a cost + analysis. +\end_layout + +\begin_layout Plain Layout +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + + +\backslash +mypar{ +\end_layout + +\end_inset + +Sorting +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + +} +\end_layout + +\end_inset + + Precisely define sorting problem you consider. +\end_layout + +\begin_layout Plain Layout +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + + +\backslash +mypar{ +\end_layout + +\end_inset + +Sorting algorithms +\begin_inset ERT +status collapsed + +\begin_layout Plain Layout + +} +\end_layout + +\end_inset + + Explain the algorithm you use including their costs. +\end_layout + +\begin_layout Plain Layout +As an aside, don't talk about +\begin_inset Quotes eld +\end_inset + +the complexity of the algorithm. +\begin_inset Quotes erd +\end_inset + + It's incorrect, problems have a complexity, not algorithms. +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Section +Method +\end_layout + +\begin_layout Subsection +Transactional Memory Model +\end_layout + +\begin_layout Standard +In this section, we describe the general model of our TM system. + This should clarify the general semantics using commonly used terms from + the literature. +\end_layout + +\begin_layout Subsubsection +Conflict Handling +\end_layout + +\begin_layout Standard +Our conflict detection works with +\emph on +object granularity +\emph default +. + Conceptually, it is based on +\emph on +read +\emph default +and +\emph on +write sets +\emph default +of transactions. + Two transactions conflict if they have accessed a common object that is + now in the write set of at least one of them. +\end_layout + +\begin_layout Standard +The +\emph on +concurrency control +\emph default +works partly +\emph on +optimistically +\emph default + for reading of objects, where conflicts caused by just reading an object + in transactions are detected only when the transaction that writes the + object actually commits. + For write-write conflicts we are currently +\emph on +pessimistic +\emph default +: Only one transaction may have a certain object in its write set at any + point in time, others trying to write to it will have to wait or abort. +\end_layout + +\begin_layout Standard +We use +\emph on +lazy version management +\emph default +to ensure that modifications by a transaction are not visible to another + transaction before the former commits. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status collapsed + +\begin_layout Itemize +Conflicts detected with +\emph on +object granularity +\emph default +conceptually using read and write sets +\end_layout + +\begin_layout Itemize +Concurrency control is: +\end_layout + +\begin_deeper +\begin_layout Itemize +optimistic for reading: a transaction checks on commit all other transactions + for read-write conflicts and aborts them or itself if necessary +\end_layout + +\begin_layout Itemize +pessimistic for write-write conflicts: in the write barrier, we check if + some other transaction already modifies this object. + If this is the case, we or the other one will abort (this may change in + the future). +\end_layout + +\end_deeper +\begin_layout Itemize + +\emph on +Lazy version management: +\emph default +it is always ensured that modifications by a transaction are not visible + to other transactions before the former commits. +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Semantics +\end_layout + +\begin_layout Standard +As required for TM systems, we guarantee complete +\emph on +isolation +\emph default +and +\emph on +atomicity +\emph default +for transactions at all times. + Furthermore, the isolation provides full +\emph on +opacity +\emph default +to always guarantee a consistent read set. +\end_layout + +\begin_layout Standard +We support the notion of +\emph on +inevitable transactions +\emph default +that are always guaranteed to commit. + There is always at most one such transaction running in the system. + We use this kind of transaction to provide +\emph on +strong isolation +\emph default + by running non-transactional code in the context of inevitable transactions + and to still provide the +\emph on +serializability +\emph default + of all transaction schedules. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status collapsed + +\begin_layout Itemize + +\emph on +Atomicity +\emph default +and +\emph on +isolation +\emph default +are guaranteed at all times +\end_layout + +\begin_layout Itemize + +\emph on +Opacity +\emph default +guarantee an always-consistent read set +\end_layout + +\begin_layout Itemize + +\emph on +Strong isolation +\emph default +is provided by using inevitable transactions +\end_layout + +\begin_layout Itemize + +\emph on +Inevitable transactions +\emph default +are transactions that are guaranteed to commit. + Turning inevitable when doing I/O is one way to ensure strong isolation +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Contention Management +\end_layout + +\begin_layout Standard +When a conflict is detected, we perform some simple contention management. + First, inevitable transactions always win. + Second, the older transaction wins. + Different schemes are possible. +\end_layout + +\begin_layout Subsubsection +Software Transactional Memory +\end_layout + +\begin_layout Standard +Generally speaking, the system is fully implemented in software. + However, we exploit some more advanced features of current CPUs, especially + +\emph on +memory segmentation, virtual memory, +\emph default +and the 64-bit address space. +\end_layout + +\begin_layout Subsection +Implementation +\end_layout + +\begin_layout Standard +In this section, we will present the general idea of how the TM model is + implemented. + Especially the aspects of providing isolation and atomicity, as well as + conflict detection are explained. + We try to do this without going into too much detail about the implementation. + The later section +\begin_inset CommandInset ref +LatexCommand ref +reference "sub:Low-level-Implementation" + +\end_inset + + will discuss it in more depth. +\end_layout + +\begin_layout Subsubsection +Memory Segmentation +\end_layout + +\begin_layout Standard +A naive approach to providing complete isolation between threads is to partition + the virtual memory of a process into +\begin_inset Formula $N$ +\end_inset + + segments, one per thread. + Each segment then holds a copy of all the memory available to the program. + Thus, each thread automatically has a private copy of every object that + it can modify in complete isolation from other threads. +\end_layout + +\begin_layout Standard +To get references to objects that are valid in all threads, we will use + the object's offset inside the segment. + Since all segments are copies of each other, the +\emph on +Segment Offset (SO) +\emph default + will point to the private version of an object in all threads/segments. + To then translate this SO to a real virtual memory address when used inside + a thread, we need to add the thread's segment start address to the SO. + The result of this operation is called a +\emph on +Linear Address (LA) +\emph default +. + This is illustrated in Figure +\begin_inset CommandInset ref +LatexCommand ref +reference "fig:Segment-Addressing" + +\end_inset + +. +\end_layout + +\begin_layout Standard +To make this address translation efficient, we use the segment register + +\begin_inset Formula $\%gs$ +\end_inset + +. + When this register points to a thread's segment start address, we can instruct + the CPU to perform the above translation from a reference of the form +\begin_inset Formula $\%gs{::}SO$ +\end_inset + + to the right LA on its own. +\end_layout + +\begin_layout Standard +In summary, we can use a single SO to reference the same object in all threads, + and it will be translated by the CPU to a LA that always points to the + thread's private version of this object. + Thereby, threads are fully isolated from each other. + However, +\begin_inset Formula $N$ +\end_inset + + segments require +\begin_inset Formula $N$ +\end_inset + +-times the memory and modifications on an object need to be propagated to + all segments. +\end_layout + +\begin_layout Standard +\begin_inset Float figure +placement t +wide true +sideways false +status open + +\begin_layout Plain Layout +\align center +\begin_inset Graphics + filename segment addressing.pdf + scale 80 + +\end_inset + + +\end_layout + +\begin_layout Plain Layout +\begin_inset Caption Standard + +\begin_layout Plain Layout +Segment Addressing +\begin_inset CommandInset label +LatexCommand label +name "fig:Segment-Addressing" + +\end_inset + + +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Plain Layout + +\end_layout + +\end_inset + + +\begin_inset Note Note +status collapsed + +\begin_layout Itemize +Partition virtual memory into N segments +\end_layout + +\begin_deeper +\begin_layout Itemize +1 segment per thread +\end_layout + +\begin_layout Itemize +each segment is a copy of the whole memory +\end_layout + +\begin_layout Itemize +objects referenced through +\emph on +segment offset (SO) +\end_layout + +\begin_layout Itemize +in this model: each thread has private copy +\end_layout + +\end_deeper +\begin_layout Itemize +Translate SO to +\emph on +linear address (LA) +\end_layout + +\begin_deeper +\begin_layout Itemize +%gs segment register holds thread's segment start address +\end_layout + +\begin_layout Itemize +CPU translates %gs::SO as +\begin_inset Formula $\%gs+SO$ +\end_inset + + to a LA +\end_layout + +\begin_layout Itemize +LA different when translated in different threads --> always points to private + copy +\end_layout + +\end_deeper +\begin_layout Itemize +N-copies are inefficient +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Page Sharing +\end_layout + +\begin_layout Standard +In order to eliminate the prohibitive memory requirements of keeping around + +\begin_inset Formula $N$ +\end_inset + + segment copies, we share memory between them. + The segments are initially allocated in a single range of virtual memory + by a call to +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +mmap() +\end_layout + +\end_inset + +. + As illustrated in Figure +\begin_inset CommandInset ref +LatexCommand ref +reference "fig:mmap()-Page-Mapping" + +\end_inset + +, +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +mmap() +\end_layout + +\end_inset + + creates a mapping between a range of virtual memory pages and virtual file + pages. + The virtual file pages are then mapped lazily by the kernel to real physical + memory pages. + The mapping generated by +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +mmap() +\end_layout + +\end_inset + + is initially linear but can be changed arbitrarily. + Especially, we can remap so that multiple virtual memory pages map to a + single virtual file page. + This is what we use to share memory between the segments since then we + also only require one page of physical memory. +\end_layout + +\begin_layout Standard +\begin_inset Float figure +wide false +sideways false +status open + +\begin_layout Plain Layout +\align center +\begin_inset Graphics + filename mmap pages.pdf + scale 80 + +\end_inset + + +\end_layout + +\begin_layout Plain Layout +\begin_inset Caption Standard + +\begin_layout Plain Layout + +\family typewriter +mmap() +\family default + Page Mapping +\begin_inset CommandInset label +LatexCommand label +name "fig:mmap()-Page-Mapping" + +\end_inset + + +\end_layout + +\end_inset + + +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +As illustrated in Figure +\begin_inset CommandInset ref +LatexCommand ref +reference "fig:Page-Remapping" + +\end_inset + +, in our initial configuration (I) all segments are backed by their own + range of virtual file pages. + This is the share-nothing configuration. +\end_layout + +\begin_layout Standard +We then designate segment 0 to be the +\emph on +Sharing-Segment +\emph default +. + No thread gets this segment assigned to it, it simply holds the pages shared + between all threads. + So in (II), we remap all virtual pages of the segments +\begin_inset Formula $>0$ +\end_inset + + to the file pages of our sharing-segment. + This is the fully-shared configuration. +\end_layout + +\begin_layout Standard +During runtime, we can then privatize single pages in segments +\begin_inset Formula $>0$ +\end_inset + + again by remapping single pages as seen in (III). +\end_layout + +\begin_layout Standard +Looking back at address translation for object references, we see now that + this is actually a two-step process. + First, +\begin_inset Formula $\%gs{::}SO$ +\end_inset + + gets translated to different linear addresses in different threads by the + CPU. + Then, depending on the current mapping of virtual pages to file pages, + these LAs can map to a single file page in the sharing-segment, or to privatize +d file pages in the corresponding segments. + This mapping is also performed efficiently by the CPU and can easily be + done on every access to an object. +\end_layout + +\begin_layout Standard +In summary, +\begin_inset Formula $\%gs{::}SO$ +\end_inset + + is translated efficiently by the CPU to either a physical memory location + which is shared between several threads/segments, or to a location in memory + private to the segment/thread. + This makes the memory segmentation model for isolation memory efficient + again. +\end_layout + +\begin_layout Standard +\begin_inset Float figure +wide false +sideways false +status open + +\begin_layout Plain Layout +\align center +\begin_inset Graphics + filename page remapping.pdf + width 100col% + +\end_inset + + +\end_layout + +\begin_layout Plain Layout +\begin_inset Caption Standard + +\begin_layout Plain Layout +Page Remapping: (I) after +\family typewriter +mmap() +\family default +. + (II) remap all pages to segment 0, fully shared memory configuration. + (III) privatize single pages. +\begin_inset CommandInset label +LatexCommand label +name "fig:Page-Remapping" + +\end_inset + + +\end_layout + +\end_inset + + +\end_layout + +\end_inset + + +\begin_inset Note Note +status collapsed + +\begin_layout Itemize +share pages between segments +\end_layout + +\begin_deeper +\begin_layout Itemize +mmap()ed region can be remapped by remap_file_pages() +\end_layout + +\begin_layout Itemize +designate a segment (segment 0) to hold SHARED pages +\end_layout + +\begin_layout Itemize +unsharing a page: remap & copy --> private page in segment >= 1 +\end_layout + +\end_deeper +\begin_layout Itemize +2-step address translation +\end_layout + +\begin_deeper +\begin_layout Itemize +first memory segmentation: %gs::SO --> LA +\end_layout + +\begin_layout Itemize +second MMU virtual address mapping: LA --> shared or private physical page +\end_layout + +\begin_layout Itemize +done on every access to objects! +\end_layout + +\end_deeper +\begin_layout Itemize +SO can translate to shared or private version of an object (completely in + hardware) +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Isolation: Copy-On-Write +\end_layout + +\begin_layout Standard +We now use these mechanisms to provide isolation for transactions. + Using write barriers, we implement a +\emph on +Copy-On-Write (COW) +\emph default +on the level of pages. + Starting from the initial fully-shared configuration (Figure +\begin_inset CommandInset ref +LatexCommand ref +reference "fig:Page-Remapping" + +\end_inset + +, (II)), when we need to modify an object without other threads seeing the + changes immediately, we ensure that all pages belonging to the object are + private to our segment. +\end_layout + +\begin_layout Standard +To detect when to privatize pages, we use write barriers before every write. + When the barrier detects that the object is not in a private page (or any + pages that belong to the object), we remap and copy the pages to the thread's + segment. + From now on, the translation of +\begin_inset Formula $\%gs{::}SO$ +\end_inset + + in this particular segment will resolve to the private version of the object. + Note, the SO used to reference the object does not change during that process. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status collapsed + +\begin_layout Itemize +if object in SHARED page, privatize & copy the page +\end_layout + +\begin_layout Itemize +no change of SO! +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Isolation: Barriers +\end_layout + +\begin_layout Standard +The job of barriers is to ensure complete isolation between transactions + and to register the objects in the read or write set. + We insert read and write barriers before reading or modifying an object + except if we statically know an object to be readable or writable already. + +\end_layout + +\begin_layout Description +Read +\begin_inset space ~ +\end_inset + +Barrier: Adds the object to the read set of the current transaction. + Since our two-step address translation automatically resolves the reference + to the private version of the object on every access anyway, this is not + the job of the read barrier anymore. +\end_layout + +\begin_layout Description +Write +\begin_inset space ~ +\end_inset + +Barrier: Adds the object to the read and write set of the current transaction + and checks if all pages of the object are private, doing COW otherwise. +\begin_inset Newline newline +\end_inset + +Furthermore, we currently allow only one transaction modifying an object + at a time. + To ensure this, we acquire a write lock on the object and also eagerly + check for a write-write conflict at this point. + If there is a conflict, we do some contention management to decide which + transaction has to wait or abort. + Eagerly detecting this kind of conflict is not inherent to our system, + future experiments may show that we want to lift this restriction. +\end_layout + +\begin_layout Standard +\begin_inset Note Note +status collapsed + +\begin_layout Itemize +read barrier +\end_layout + +\begin_deeper +\begin_layout Itemize +SO automatically translated to right (PRIVATE or SHARED) version of the + object +\end_layout + +\begin_layout Itemize +add object to read set +\end_layout + +\end_deeper +\begin_layout Itemize +write barrier +\end_layout + +\begin_deeper +\begin_layout Itemize +if object is in SHARED page, COW +\end_layout + +\begin_layout Itemize +acquire write lock (w-w conflict detection and contention management) +\end_layout + +\begin_layout Itemize +add to read & write set +\end_layout + +\end_deeper +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Atomicity: Commit & Abort +\end_layout + +\begin_layout Standard +To provide atomicity for a transaction, we want to make changes visible + on commit. + We also need to be able to completely abort a transaction without a trace, + like it never happened. +\end_layout + +\begin_layout Description +Commit: If a transaction commits, we synchronize all threads so that all + of them are waiting in a safe point. + In the committing transaction, we go through all objects in the write set + and check if another transaction in a different segment read the same object. + Conflicts are resolved again by either the committing or the other transaction + waiting or aborting. +\begin_inset Newline newline +\end_inset + +We then push all changes of modified objects in private pages to all the + pages in other segments, including the sharing-segment (segment 0). +\end_layout + +\begin_layout Description +Abort: On abort the transaction will forget about all the changes it has + done. + All objects in the write set are reset by copying their previous version + from the sharing-segment into the private pages of the aborting transaction. +\end_layout + +\begin_layout Description +\begin_inset Note Note +status collapsed + +\begin_layout Itemize +Commit +\end_layout + +\begin_deeper +\begin_layout Itemize +go through write set: check r-w conflicts in other segments (contention + management) +\end_layout + +\begin_layout Itemize +modifications in private pages: copy to other segments (object-level, not + page-level) +\end_layout + +\end_deeper +\begin_layout Itemize +Abort +\end_layout + +\begin_deeper +\begin_layout Itemize +look in SHARED segment 0 and reset all modifications in private pages +\end_layout + +\end_deeper +\end_inset + + +\end_layout + +\begin_layout Subsubsection +Summary +\end_layout + +\begin_layout Standard +We provide isolation between transactions by privatizing the pages of the + segments belonging to the threads the transactions run in. + To detect when and which pages need privatization, we use write barriers + that trigger a COW of one or several pages. + Conflicts, however, are detected on the level of objects; based on the + concept of read and write sets. + Barriers before reading and writing add objects to the corresponding set; + particularly detecting write-write conflicts eagerly. + On commit, we resolve read-write conflicts and push modifications to other + segments. + Aborting transactions simply undo their changes by copying from the sharing-seg +ment. +\end_layout + +\begin_layout Subsection +Low-level Implementation +\begin_inset CommandInset label +LatexCommand label +name "sub:Low-level-Implementation" + +\end_inset + + +\end_layout + +\begin_layout Standard +In this section, we will provide details about the actual implementation + of the system and discuss some of the issues that we encountered. +\end_layout + +\begin_layout Subsubsection +Architecture +\end_layout + +\begin_layout Standard +Our TM system is designed as a library that covers all aspects around transactio +ns and object management. + The library consists of two parts: (I) It provides a simple interface to + starting and committing transactions, as well as the required read and + write barriers. + (II) It also includes a +\emph on +garbage collector (GC) +\emph default +that is closely integrated with the TM part (e.g. + it shares the write barrier). + The close integration helps in order to know more about the lifetime of + an object, as will be explained in the following sections. +\end_layout + +\begin_layout Subsubsection +Application Programming Interface +\begin_inset CommandInset label +LatexCommand label +name "sub:Application-Programming-Interfac" + +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset listings +lstparams "basicstyle={\footnotesize\ttfamily},tabsize=4" +inline false +status open + +\begin_layout Plain Layout + +void stm_start_transaction(tl, jmpbuf) +\end_layout + +\begin_layout Plain Layout + +void stm_commit_transaction() +\end_layout + +\begin_layout Plain Layout + +void stm_read(object_t *obj) +\end_layout + +\begin_layout Plain Layout + +void stm_write(object_t *obj) +\end_layout + +\begin_layout Plain Layout + +object_t *stm_allocate(ssize_t size_rounded) +\end_layout + +\begin_layout Plain Layout + +STM_PUSH_ROOT(tl, obj) +\end_layout + +\begin_layout Plain Layout + +STM_POP_ROOT(tl, obj) +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_start_transaction() +\end_layout + +\end_inset + + starts a transaction. + It requires two arguments, the first being a thread-local data structure + and the second a buffer for use by +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +setjmp() +\end_layout + +\end_inset + +. + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_commit_transaction() +\end_layout + +\end_inset + + tries to commit the current transaction. + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_read() +\end_layout + +\end_inset + +, +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_write() +\end_layout + +\end_inset + + perform a read or a write barrier on an object and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_allocate() +\end_layout + +\end_inset + + allocates a new object with the specified size (must be a multiple of 16). + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +STM_PUSH_ROOT() +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +STM_POP_ROOT() +\end_layout + +\end_inset + + push and pop objects on the shadow stack +\begin_inset Foot +status open + +\begin_layout Plain Layout +A stack for pointers to GC objects that allows for precise garbage collection. + All objects on that stack are never seen as garbage and are thus always + kept alive. +\end_layout + +\end_inset + +. + Objects have to be saved using this stack around calls that may cause a + GC cycle to happen, and also while there is no transaction running. + In this simplified API, only +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_allocate() +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +stm_commit_transaction() +\end_layout + +\end_inset + + require saving object references. +\end_layout + +\begin_layout Standard +The type +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +object_t +\end_layout + +\end_inset + + is special as it causes the compiler +\begin_inset Foot +status collapsed + +\begin_layout Plain Layout +Clang 3.5 with some patches to this address-space 256 feature +\end_layout + +\end_inset + + to make all accesses through it relative to the +\begin_inset Formula $\%gs$ +\end_inset + + register. + With exceptions, nearly all accesses to objects managed by the TM system + should use this type so that the CPU will translate the reference to the + right version of the object. +\end_layout + +\begin_layout Subsubsection +Setup +\begin_inset CommandInset label +LatexCommand label From noreply at buildbot.pypy.org Thu May 15 09:31:06 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 May 2014 09:31:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add paper template Message-ID: <20140515073106.CC9E91C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5248:67ffec8bdea3 Date: 2014-05-15 09:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/67ffec8bdea3/ Log: add paper template diff --git a/talk/dls2014/paper/Makefile b/talk/dls2014/paper/Makefile new file mode 100644 --- /dev/null +++ b/talk/dls2014/paper/Makefile @@ -0,0 +1,11 @@ +PROJECT=paper +TEX=pdflatex +BUILDTEX=$(TEX) $(PROJECT).tex + +all: + $(BUILDTEX) + $(BUILDTEX) + + +clean: + rm -f *.log *.bak *.aux *.bbl *.blg *.idx *.toc *.out *~ diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex new file mode 100644 --- /dev/null +++ b/talk/dls2014/paper/paper.tex @@ -0,0 +1,206 @@ + +\documentclass{sigplanconf} + +% The following \documentclass options may be useful: + +% preprint Remove this option only once the paper is in final form. +% 10pt To set in 10-point type instead of 9-point. +% 11pt To set in 11-point type instead of 9-point. +% authoryear To obtain author/year citation style instead of numeric. + +\usepackage[utf8]{inputenc} +\usepackage{array} +\usepackage{color} +\usepackage{hyperref} +\usepackage{amsmath} +\usepackage{amssymb} + +\newcommand{\mynote}[2]{% + \textcolor{red}{% + \fbox{\bfseries\sffamily\scriptsize#1}% + {\small$\blacktriangleright$\textsf{\emph{#2}}$\blacktriangleleft$}% + }% +} + +\newcommand\cfbolz[1]{\mynote{Carl Friedrich}{#1}} + +\begin{document} + +\special{papersize=8.5in,11in} +\setlength{\pdfpageheight}{\paperheight} +\setlength{\pdfpagewidth}{\paperwidth} + +\conferenceinfo{ICOOOLPS workshop 2014}{July 28th, 2014, Uppsala, Sweden} +\copyrightyear{2014} +%\copyrightdata{978-1-nnnn-nnnn-n/yy/mm} +\doi{nnnnnnn.nnnnnnn} + +% Uncomment one of the following two, if you are not going for the +% traditional copyright transfer agreement. + +%\exclusivelicense % ACM gets exclusive license to publish, + % you retain copyright + +%\permissiontopublish % ACM gets nonexclusive license to publish + % (paid open-access papers, + % short abstracts) + +%% \titlebanner{banner above paper title} % These are ignored unless +%% \preprintfooter{short description of paper} % 'preprint' option specified. + +\title{A Way Forward in Parallelising Dynamic Languages} +\subtitle{Position Paper, ICOOOLPS'14} + +\authorinfo{Remigius Meier} + {Department of Computer Science\\ ETH Zürich} + {remi.meier at inf.ethz.ch} +\authorinfo{Armin Rigo} + {www.pypy.org} + {arigo at tunes.org} + +\maketitle + +\begin{abstract} +.... +\end{abstract} + +%\category{CR-number}{subcategory}{third-level} + +% general terms are not compulsory anymore, +% you may leave them out +%% \terms +%% term1, term2 + +\keywords +... + +\section{Introduction} +... + + +\section{Conclusion} + + +%% \appendix +%% \section{Appendix Title} + +%% This is the text of the appendix, if you need one. + +\acks +... + +% We recommend abbrvnat bibliography style. + +\bibliographystyle{abbrvnat} + +% The bibliography should be embedded for final submission. + +\begin{thebibliography}{} +\softraggedright + +\bibitem{dan07} + Dan Grossman. 2007. The transactional memory / garbage collection + analogy. \emph{In Proceedings of the 22nd annual ACM SIGPLAN + conference on Object-oriented programming systems and + applications} (OOPSLA '07). + +\bibitem{webjython} + The Jython Project, \url{www.jython.org} + +\bibitem{odaira14} + Odaira, Rei, Jose G. Castanos, and Hisanobu Tomari. "Eliminating + global interpreter locks in Ruby through hardware transactional + memory." \emph{Proceedings of the 19th ACM SIGPLAN symposium on + Principles and practice of parallel programming.} ACM, 2014. + +\bibitem{warmhoff13} + Wamhoff, Jons-Tobias, et al. "FastLane: improving performance of + software transactional memory for low thread counts." + \emph{Proceedings of the 18th ACM SIGPLAN symposium on Principles + and practice of parallel programming.} ACM, 2013. + +\bibitem{drago11} + Dragojević, Aleksandar, et al. "Why STM can be more than a research + toy." \emph{Communications of the ACM} 54.4 (2011): 70-77. + +\bibitem{cascaval08} + Cascaval, Calin, et al. "Software transactional memory: Why is it + only a research toy?." \emph{Queue} 6.5 (2008): 40. + +\bibitem{nicholas06} + Nicholas Riley and Craig Zilles. 2006. Hardware transactional memory + support for lightweight dynamic language evolution. \emph{In + Companion to the 21st ACM SIGPLAN symposium on Object-oriented + programming systems, languages, and applications} (OOPSLA + '06). ACM, New York, NY, USA + +\bibitem{fuad10} + Fuad Tabba. 2010. Adding concurrency in python using a commercial + processor's hardware transactional memory support. \emph{SIGARCH + Comput. Archit. News 38}, 5 (April 2010) + +\bibitem{felber07} + Felber, Pascal, et al. "Transactifying applications using an open + compiler framework." \emph{TRANSACT}, August (2007): 4-6. + +\bibitem{bill06} + Bill McCloskey, Feng Zhou, David Gay, and Eric + Brewer. 2006. Autolocker: synchronization inference for atomic + sections. \emph{In Conference record of the 33rd ACM SIGPLAN-SIGACT + symposium on Principles of programming languages (POPL '06)}. ACM, + New York, NY, USA + +\bibitem{spear09} + Spear, Michael F., et al. "Transactional mutex locks." \emph{SIGPLAN + Workshop on Transactional Computing.} 2009. + +\bibitem{lamport79} + Lamport, Leslie. "How to make a multiprocessor computer that + correctly executes multiprocess programs." \emph{Computers, IEEE + Transactions} on 100.9 (1979): 690-691. + +\bibitem{victor11} + Victor Pankratius and Ali-Reza Adl-Tabatabai. 2011. A study of + transactional memory vs. locks in practice. In \emph{Proceedings of + the twenty-third annual ACM symposium on Parallelism in algorithms + and architectures} (SPAA '11). ACM, New York, NY, USA + +\bibitem{christopher10} + Christopher J. Rossbach, Owen S. Hofmann, and Emmett + Witchel. 2010. Is transactional programming actually + easier?. \emph{SIGPLAN} Not. 45, 5 (January 2010), 47-56. + +\bibitem{tim03} + Tim Harris and Keir Fraser. 2003. Language support for lightweight + transactions. \emph{In Proceedings of the 18th annual ACM SIGPLAN + conference on Object-oriented programing, systems, languages, and + applications} (OOPSLA '03). + +\bibitem{tim05} + Tim Harris, Simon Marlow, Simon Peyton-Jones, and Maurice + Herlihy. 2005. Composable memory transactions. \emph{In Proceedings + of the tenth ACM SIGPLAN symposium on Principles and practice of + parallel programming} (PPoPP '05). + +\bibitem{shan08} + Shan Lu, Soyeon Park, Eunsoo Seo, and Yuanyuan Zhou. 2008. Learning + from mistakes: a comprehensive study on real world concurrency bug + characteristics. \emph{SIGARCH Comput. Archit. News} 36, 1 (March 2008), + 329-339. + +\bibitem{leis14} + Leis, Viktor, Alfons Kemper, and Thomas Neumann. "Exploiting + Hardware Transactional Memory in Main-Memory Databases." + \emph{Proc. of ICDE}. 2014. + +\bibitem{biased} + Kenneth Russell and David Detlefs. 2006. Eliminating + synchronization-related atomic operations with biased locking and + bulk rebiasing. \emph{In Proceedings of the 21st annual ACM SIGPLAN + conference on Object-oriented programing, systems, languages, and + applications} (OOPSLA '06). + +\end{thebibliography} + + +\end{document} diff --git a/talk/dls2014/paper/sigplanconf.cls b/talk/dls2014/paper/sigplanconf.cls new file mode 100644 --- /dev/null +++ b/talk/dls2014/paper/sigplanconf.cls @@ -0,0 +1,1311 @@ +%----------------------------------------------------------------------------- +% +% LaTeX Class/Style File +% +% Name: sigplanconf.cls +% +% Purpose: A LaTeX 2e class file for SIGPLAN conference proceedings. +% This class file supercedes acm_proc_article-sp, +% sig-alternate, and sigplan-proc. +% +% Author: Paul C. Anagnostopoulos +% Windfall Software +% 978 371-2316 +% paul [atsign] windfall.com +% +% Created: 12 September 2004 +% +% Revisions: See end of file. +% +% This work is licensed under the Creative Commons Attribution License. +% To view a copy of this license, visit +% http://creativecommons.org/licenses/by/3.0/ +% or send a letter to Creative Commons, 171 2nd Street, Suite 300, +% San Francisco, California, 94105, U.S.A. +% +%----------------------------------------------------------------------------- + + +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{sigplanconf}[2013/07/02 v2.8 ACM SIGPLAN Proceedings] + +% The following few pages contain LaTeX programming extensions adapted +% from the ZzTeX macro package. + +% Token Hackery +% ----- ------- + + +\def \@expandaftertwice {\expandafter\expandafter\expandafter} +\def \@expandafterthrice {\expandafter\expandafter\expandafter\expandafter + \expandafter\expandafter\expandafter} + +% This macro discards the next token. + +\def \@discardtok #1{}% token + +% This macro removes the `pt' following a dimension. + +{\catcode `\p = 12 \catcode `\t = 12 + +\gdef \@remover #1pt{#1} + +} % \catcode + +% This macro extracts the contents of a macro and returns it as plain text. +% Usage: \expandafter\@defof \meaning\macro\@mark + +\def \@defof #1:->#2\@mark{#2} + +% Control Sequence Names +% ------- -------- ----- + + +\def \@name #1{% {\tokens} + \csname \expandafter\@discardtok \string#1\endcsname} + +\def \@withname #1#2{% {\command}{\tokens} + \expandafter#1\csname \expandafter\@discardtok \string#2\endcsname} + +% Flags (Booleans) +% ----- ---------- + +% The boolean literals \@true and \@false are appropriate for use with +% the \if command, which tests the codes of the next two characters. + +\def \@true {TT} +\def \@false {FL} + +\def \@setflag #1=#2{\edef #1{#2}}% \flag = boolean + +% IF and Predicates +% -- --- ---------- + +% A "predicate" is a macro that returns \@true or \@false as its value. +% Such values are suitable for use with the \if conditional. For example: +% +% \if \@oddp{\x} \else \fi + +% A predicate can be used with \@setflag as follows: +% +% \@setflag \flag = {} + +% Here are the predicates for TeX's repertoire of conditional +% commands. These might be more appropriately interspersed with +% other definitions in this module, but what the heck. +% Some additional "obvious" predicates are defined. + +\def \@eqlp #1#2{\ifnum #1 = #2\@true \else \@false \fi} +\def \@neqlp #1#2{\ifnum #1 = #2\@false \else \@true \fi} +\def \@lssp #1#2{\ifnum #1 < #2\@true \else \@false \fi} +\def \@gtrp #1#2{\ifnum #1 > #2\@true \else \@false \fi} +\def \@zerop #1{\ifnum #1 = 0\@true \else \@false \fi} +\def \@onep #1{\ifnum #1 = 1\@true \else \@false \fi} +\def \@posp #1{\ifnum #1 > 0\@true \else \@false \fi} +\def \@negp #1{\ifnum #1 < 0\@true \else \@false \fi} +\def \@oddp #1{\ifodd #1\@true \else \@false \fi} +\def \@evenp #1{\ifodd #1\@false \else \@true \fi} +\def \@rangep #1#2#3{\if \@orp{\@lssp{#1}{#2}}{\@gtrp{#1}{#3}}\@false \else + \@true \fi} +\def \@tensp #1{\@rangep{#1}{10}{19}} + +\def \@dimeqlp #1#2{\ifdim #1 = #2\@true \else \@false \fi} +\def \@dimneqlp #1#2{\ifdim #1 = #2\@false \else \@true \fi} +\def \@dimlssp #1#2{\ifdim #1 < #2\@true \else \@false \fi} +\def \@dimgtrp #1#2{\ifdim #1 > #2\@true \else \@false \fi} +\def \@dimzerop #1{\ifdim #1 = 0pt\@true \else \@false \fi} +\def \@dimposp #1{\ifdim #1 > 0pt\@true \else \@false \fi} +\def \@dimnegp #1{\ifdim #1 < 0pt\@true \else \@false \fi} + +\def \@vmodep {\ifvmode \@true \else \@false \fi} +\def \@hmodep {\ifhmode \@true \else \@false \fi} +\def \@mathmodep {\ifmmode \@true \else \@false \fi} +\def \@textmodep {\ifmmode \@false \else \@true \fi} +\def \@innermodep {\ifinner \@true \else \@false \fi} + +\long\def \@codeeqlp #1#2{\if #1#2\@true \else \@false \fi} + +\long\def \@cateqlp #1#2{\ifcat #1#2\@true \else \@false \fi} + +\long\def \@tokeqlp #1#2{\ifx #1#2\@true \else \@false \fi} +\long\def \@xtokeqlp #1#2{\expandafter\ifx #1#2\@true \else \@false \fi} + +\long\def \@definedp #1{% + \expandafter\ifx \csname \expandafter\@discardtok \string#1\endcsname + \relax \@false \else \@true \fi} + +\long\def \@undefinedp #1{% + \expandafter\ifx \csname \expandafter\@discardtok \string#1\endcsname + \relax \@true \else \@false \fi} + +\def \@emptydefp #1{\ifx #1\@empty \@true \else \@false \fi}% {\name} + +\let \@emptylistp = \@emptydefp + +\long\def \@emptyargp #1{% {#n} + \@empargp #1\@empargq\@mark} +\long\def \@empargp #1#2\@mark{% + \ifx #1\@empargq \@true \else \@false \fi} +\def \@empargq {\@empargq} + +\def \@emptytoksp #1{% {\tokenreg} + \expandafter\@emptoksp \the#1\@mark} + +\long\def \@emptoksp #1\@mark{\@emptyargp{#1}} + +\def \@voidboxp #1{\ifvoid #1\@true \else \@false \fi} +\def \@hboxp #1{\ifhbox #1\@true \else \@false \fi} +\def \@vboxp #1{\ifvbox #1\@true \else \@false \fi} + +\def \@eofp #1{\ifeof #1\@true \else \@false \fi} + + +% Flags can also be used as predicates, as in: +% +% \if \flaga \else \fi + + +% Now here we have predicates for the common logical operators. + +\def \@notp #1{\if #1\@false \else \@true \fi} + +\def \@andp #1#2{\if #1% + \if #2\@true \else \@false \fi + \else + \@false + \fi} + +\def \@orp #1#2{\if #1% + \@true + \else + \if #2\@true \else \@false \fi + \fi} + +\def \@xorp #1#2{\if #1% + \if #2\@false \else \@true \fi + \else + \if #2\@true \else \@false \fi + \fi} + +% Arithmetic +% ---------- + +\def \@increment #1{\advance #1 by 1\relax}% {\count} + +\def \@decrement #1{\advance #1 by -1\relax}% {\count} + +% Options +% ------- + + +\@setflag \@authoryear = \@false +\@setflag \@blockstyle = \@false +\@setflag \@copyrightwanted = \@true +\@setflag \@explicitsize = \@false +\@setflag \@mathtime = \@false +\@setflag \@natbib = \@true +\@setflag \@ninepoint = \@true +\newcount{\@numheaddepth} \@numheaddepth = 3 +\@setflag \@onecolumn = \@false +\@setflag \@preprint = \@false +\@setflag \@reprint = \@false +\@setflag \@tenpoint = \@false +\@setflag \@times = \@false + +% Note that all the dangerous article class options are trapped. + +\DeclareOption{9pt}{\@setflag \@ninepoint = \@true + \@setflag \@explicitsize = \@true} + +\DeclareOption{10pt}{\PassOptionsToClass{10pt}{article}% + \@setflag \@ninepoint = \@false + \@setflag \@tenpoint = \@true + \@setflag \@explicitsize = \@true} + +\DeclareOption{11pt}{\PassOptionsToClass{11pt}{article}% + \@setflag \@ninepoint = \@false + \@setflag \@explicitsize = \@true} + +\DeclareOption{12pt}{\@unsupportedoption{12pt}} + +\DeclareOption{a4paper}{\@unsupportedoption{a4paper}} + +\DeclareOption{a5paper}{\@unsupportedoption{a5paper}} + +\DeclareOption{authoryear}{\@setflag \@authoryear = \@true} + +\DeclareOption{b5paper}{\@unsupportedoption{b5paper}} + +\DeclareOption{blockstyle}{\@setflag \@blockstyle = \@true} + +\DeclareOption{cm}{\@setflag \@times = \@false} + +\DeclareOption{computermodern}{\@setflag \@times = \@false} + +\DeclareOption{executivepaper}{\@unsupportedoption{executivepaper}} + +\DeclareOption{indentedstyle}{\@setflag \@blockstyle = \@false} + +\DeclareOption{landscape}{\@unsupportedoption{landscape}} + +\DeclareOption{legalpaper}{\@unsupportedoption{legalpaper}} + +\DeclareOption{letterpaper}{\@unsupportedoption{letterpaper}} + +\DeclareOption{mathtime}{\@setflag \@mathtime = \@true} + +\DeclareOption{natbib}{\@setflag \@natbib = \@true} + +\DeclareOption{nonatbib}{\@setflag \@natbib = \@false} + +\DeclareOption{nocopyrightspace}{\@setflag \@copyrightwanted = \@false} + +\DeclareOption{notitlepage}{\@unsupportedoption{notitlepage}} + +\DeclareOption{numberedpars}{\@numheaddepth = 4} + +\DeclareOption{numbers}{\@setflag \@authoryear = \@false} + +%%%\DeclareOption{onecolumn}{\@setflag \@onecolumn = \@true} + +\DeclareOption{preprint}{\@setflag \@preprint = \@true} + +\DeclareOption{reprint}{\@setflag \@reprint = \@true} + +\DeclareOption{times}{\@setflag \@times = \@true} + +\DeclareOption{titlepage}{\@unsupportedoption{titlepage}} + +\DeclareOption{twocolumn}{\@setflag \@onecolumn = \@false} + +\DeclareOption*{\PassOptionsToClass{\CurrentOption}{article}} + +\ExecuteOptions{9pt,indentedstyle,times} +\@setflag \@explicitsize = \@false +\ProcessOptions + +\if \@onecolumn + \if \@notp{\@explicitsize}% + \@setflag \@ninepoint = \@false + \PassOptionsToClass{11pt}{article}% + \fi + \PassOptionsToClass{twoside,onecolumn}{article} +\else + \PassOptionsToClass{twoside,twocolumn}{article} +\fi +\LoadClass{article} + +\def \@unsupportedoption #1{% + \ClassError{proc}{The standard '#1' option is not supported.}} + +% This can be used with the 'reprint' option to get the final folios. + +\def \setpagenumber #1{% + \setcounter{page}{#1}} + +\AtEndDocument{\label{sigplanconf at finalpage}} + +% Utilities +% --------- + + +\newcommand{\setvspace}[2]{% + #1 = #2 + \advance #1 by -1\parskip} + +% Document Parameters +% -------- ---------- + + +% Page: + +\setlength{\hoffset}{-1in} +\setlength{\voffset}{-1in} + +\setlength{\topmargin}{1in} +\setlength{\headheight}{0pt} +\setlength{\headsep}{0pt} + +\if \@onecolumn + \setlength{\evensidemargin}{.75in} + \setlength{\oddsidemargin}{.75in} +\else + \setlength{\evensidemargin}{.75in} + \setlength{\oddsidemargin}{.75in} +\fi + +% Text area: + +\newdimen{\standardtextwidth} +\setlength{\standardtextwidth}{42pc} + +\if \@onecolumn + \setlength{\textwidth}{40.5pc} +\else + \setlength{\textwidth}{\standardtextwidth} +\fi + +\setlength{\topskip}{8pt} +\setlength{\columnsep}{2pc} +\setlength{\textheight}{54.5pc} + +% Running foot: + +\setlength{\footskip}{30pt} + +% Paragraphs: + +\if \@blockstyle + \setlength{\parskip}{5pt plus .1pt minus .5pt} + \setlength{\parindent}{0pt} +\else + \setlength{\parskip}{0pt} + \setlength{\parindent}{12pt} +\fi + +\setlength{\lineskip}{.5pt} +\setlength{\lineskiplimit}{\lineskip} + +\frenchspacing +\pretolerance = 400 +\tolerance = \pretolerance +\setlength{\emergencystretch}{5pt} +\clubpenalty = 10000 +\widowpenalty = 10000 +\setlength{\hfuzz}{.5pt} + +% Standard vertical spaces: + +\newskip{\standardvspace} +\setvspace{\standardvspace}{5pt plus 1pt minus .5pt} + +% Margin paragraphs: + +\setlength{\marginparwidth}{36pt} +\setlength{\marginparsep}{2pt} +\setlength{\marginparpush}{8pt} + + +\setlength{\skip\footins}{8pt plus 3pt minus 1pt} +\setlength{\footnotesep}{9pt} + +\renewcommand{\footnoterule}{% + \hrule width .5\columnwidth height .33pt depth 0pt} + +\renewcommand{\@makefntext}[1]{% + \noindent \@makefnmark \hspace{1pt}#1} + +% Floats: + +\setcounter{topnumber}{4} +\setcounter{bottomnumber}{1} +\setcounter{totalnumber}{4} + +\renewcommand{\fps at figure}{tp} +\renewcommand{\fps at table}{tp} +\renewcommand{\topfraction}{0.90} +\renewcommand{\bottomfraction}{0.30} +\renewcommand{\textfraction}{0.10} +\renewcommand{\floatpagefraction}{0.75} + +\setcounter{dbltopnumber}{4} + +\renewcommand{\dbltopfraction}{\topfraction} +\renewcommand{\dblfloatpagefraction}{\floatpagefraction} + +\setlength{\floatsep}{18pt plus 4pt minus 2pt} +\setlength{\textfloatsep}{18pt plus 4pt minus 3pt} +\setlength{\intextsep}{10pt plus 4pt minus 3pt} + +\setlength{\dblfloatsep}{18pt plus 4pt minus 2pt} +\setlength{\dbltextfloatsep}{20pt plus 4pt minus 3pt} + +% Miscellaneous: + +\errorcontextlines = 5 + +% Fonts +% ----- + + +\if \@times + \renewcommand{\rmdefault}{ptm}% + \if \@mathtime + \usepackage[mtbold,noTS1]{mathtime}% + \else +%%% \usepackage{mathptm}% + \fi +\else + \relax +\fi + +\if \@ninepoint + +\renewcommand{\normalsize}{% + \@setfontsize{\normalsize}{9pt}{10pt}% + \setlength{\abovedisplayskip}{5pt plus 1pt minus .5pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{3pt plus 1pt minus 2pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\tiny}{\@setfontsize{\tiny}{5pt}{6pt}} + +\renewcommand{\scriptsize}{\@setfontsize{\scriptsize}{7pt}{8pt}} + +\renewcommand{\small}{% + \@setfontsize{\small}{8pt}{9pt}% + \setlength{\abovedisplayskip}{4pt plus 1pt minus 1pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{2pt plus 1pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\footnotesize}{% + \@setfontsize{\footnotesize}{8pt}{9pt}% + \setlength{\abovedisplayskip}{4pt plus 1pt minus .5pt}% + \setlength{\belowdisplayskip}{\abovedisplayskip}% + \setlength{\abovedisplayshortskip}{2pt plus 1pt}% + \setlength{\belowdisplayshortskip}{\abovedisplayshortskip}} + +\renewcommand{\large}{\@setfontsize{\large}{11pt}{13pt}} + +\renewcommand{\Large}{\@setfontsize{\Large}{14pt}{18pt}} + +\renewcommand{\LARGE}{\@setfontsize{\LARGE}{18pt}{20pt}} + +\renewcommand{\huge}{\@setfontsize{\huge}{20pt}{25pt}} + +\renewcommand{\Huge}{\@setfontsize{\Huge}{25pt}{30pt}} + +\else\if \@tenpoint + +\relax + +\else + +\relax + +\fi\fi + +% Abstract +% -------- + + +\renewenvironment{abstract}{% + \section*{Abstract}% + \normalsize}{% + } + +% Bibliography +% ------------ + + +\renewenvironment{thebibliography}[1] + {\section*{\refname + \@mkboth{\MakeUppercase\refname}{\MakeUppercase\refname}}% + \list{\@biblabel{\@arabic\c at enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \@openbib at code + \usecounter{enumiv}% + \let\p at enumiv\@empty + \renewcommand\theenumiv{\@arabic\c at enumiv}}% + \bibfont + \clubpenalty4000 + \@clubpenalty \clubpenalty + \widowpenalty4000% + \sfcode`\.\@m} + {\def\@noitemerr + {\@latex at warning{Empty `thebibliography' environment}}% + \endlist} + +\if \@natbib + +\if \@authoryear + \typeout{Using natbib package with 'authoryear' citation style.} + \usepackage[authoryear,square]{natbib} + \bibpunct{(}{)}{;}{a}{}{,} % Change fences to parentheses; + % citation separator to semicolon; + % eliminate comma between author and year. + \let \cite = \citep +\else + \typeout{Using natbib package with 'numbers' citation style.} + \usepackage[numbers,sort&compress,square]{natbib} +\fi +\setlength{\bibsep}{3pt plus .5pt minus .25pt} + +\fi + +\def \bibfont {\small} + +% Categories +% ---------- + + +\@setflag \@firstcategory = \@true + +\newcommand{\category}[3]{% + \if \@firstcategory + \paragraph*{Categories and Subject Descriptors}% + \@setflag \@firstcategory = \@false + \else + \unskip ;\hspace{.75em}% + \fi + \@ifnextchar [{\@category{#1}{#2}{#3}}{\@category{#1}{#2}{#3}[]}} + +\def \@category #1#2#3[#4]{% + {\let \and = \relax + #1 [\textit{#2}]% + \if \@emptyargp{#4}% + \if \@notp{\@emptyargp{#3}}: #3\fi + \else + :\space + \if \@notp{\@emptyargp{#3}}#3---\fi + \textrm{#4}% + \fi}} + +% Copyright Notice +% --------- ------ + + +\def \ftype at copyrightbox {8} +\def \@toappear {} +\def \@permission {} +\def \@reprintprice {} + +\def \@copyrightspace {% + \@float{copyrightbox}[b]% + \vbox to 1.2in{% + \vfill + \parbox[b]{20pc}{% + \scriptsize + \if \@preprint + [Copyright notice will appear here + once 'preprint' option is removed.]\par + \else + \@toappear + \fi + \if \@reprint + \noindent Reprinted from \@conferencename, + \@proceedings, + \@conferenceinfo, + pp.~\number\thepage--\pageref{sigplanconf at finalpage}.\par + \fi}}% + \end at float} + +\newcommand{\reprintprice}[1]{% + \gdef \@reprintprice {#1}} + +\reprintprice{\$15.00} + +\long\def \toappear #1{% + \def \@toappear {#1}} + +\toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent Copyright \copyright\ \@copyrightyear\ ACM \@copyrightdata + \dots \@reprintprice.\par + \noindent http://dx.doi.org/10.1145/\@doi } + +\newcommand{\permission}[1]{% + \gdef \@permission {#1}} + +\permission{% + Permission to make digital or hard copies of all or part of this work for + personal or classroom use is granted without fee provided that copies are + not made or distributed for profit or commercial advantage and that copies + bear this notice and the full citation on the first page. Copyrights for + components of this work owned by others than ACM must be honored. + Abstracting with credit is permitted. To copy otherwise, or republish, to + post on servers or to redistribute to lists, requires prior specific + permission and/or a fee. Request permissions from permissions at acm.org.} + +% These are two new rights management and bibstrip text blocks. + +\newcommand{\exclusivelicense}{% + \permission{% + Permission to make digital or hard copies of all or part of this work for + personal or classroom use is granted without fee provided that copies are + not made or distributed for profit or commercial advantage and that copies + bear this notice and the full citation on the first page. Copyrights for + components of this work owned by others than the author(s) must be honored. + Abstracting with credit is permitted. To copy otherwise, or republish, to + post on servers or to redistribute to lists, requires prior specific + permission and/or a fee. Request permissions from permissions at acm.org.} + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent Copyright is held by the owner/author(s). Publication rights licensed to ACM. \par + \noindent ACM \@copyrightdata \dots \@reprintprice.\par + \noindent http://dx.doi.org/10.1145/\@doi}} + +\newcommand{\permissiontopublish}{% + \permission{% + Permission to make digital or hard copies of part or all of this work for + personal or classroom use is granted without fee provided that copies are + not made or distributed for profit or commercial advantage and that copies + bear this notice and the full citation on the first page. Copyrights for + third-party components of this work must be honored. + For all other uses, contact the owner/author(s).}% + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent Copyright is held by the owner/author(s). \par + \noindent ACM \@copyrightdata.\par + \noindent http://dx.doi.org/10.1145/\@doi}} + +% The following permission notices are +% for the traditional copyright transfer agreement option. + +% Exclusive license and permission-to-publish +% give more complicated permission notices. +% These are not covered here. + +\newcommand{\ACMCanadapermission}{% + \permission{% + ACM acknowledges that this contribution was authored or + co-authored by an affiliate of the Canadian National + Government. As such, the Crown in Right of Canada retains an equal + interest in the copyright. Reprint requests should be forwarded to + ACM.}} + +\newcommand{\ACMUSpermission}{% + \permission{% + ACM acknowledges that this contribution was authored or + co-authored by a contractor or affiliate of the United States + Government. As such, the United States Government retains a + nonexclusive, royalty-free right to publish or reproduce this + article, or to allow others to do so, for Government purposes + only.}} + +\newcommand{\USpublicpermission}{% + \permission{% + This paper is authored by an employee(s) of the United States + Government and is in the public domain. Non-exclusive copying or + redistribution is allowed, provided that the article citation is + given and the authors and the agency are clearly identified as its + source.}% + \toappear{% + \noindent \@permission \par + \vspace{2pt} + \noindent \textsl{\@conferencename}, \quad \@conferenceinfo. \par + \noindent ACM \@copyrightdata.\par + \noindent http://dx.doi.org/10.1145/\@doi}} + +\newcommand{\authorversion}[4]{% + \permission{% + Copyright \copyright\ ACM, #1. This is the author's version of the work. + It is posted here by permission of ACM for your personal use. + Not for redistribution. The definitive version was published in + #2, #3, http://dx.doi.org/10.1145/#4.}} + +% Enunciations +% ------------ + + +\def \@begintheorem #1#2{% {name}{number} + \trivlist + \item[\hskip \labelsep \textsc{#1 #2.}]% + \itshape\selectfont + \ignorespaces} + +\def \@opargbegintheorem #1#2#3{% {name}{number}{title} + \trivlist + \item[% + \hskip\labelsep \textsc{#1\ #2}% + \if \@notp{\@emptyargp{#3}}\nut (#3).\fi]% + \itshape\selectfont + \ignorespaces} + +% Figures +% ------- + + +\@setflag \@caprule = \@true + +\long\def \@makecaption #1#2{% + \addvspace{4pt} + \if \@caprule + \hrule width \hsize height .33pt + \vspace{4pt} + \fi + \setbox \@tempboxa = \hbox{\@setfigurenumber{#1.}\nut #2}% + \if \@dimgtrp{\wd\@tempboxa}{\hsize}% + \noindent \@setfigurenumber{#1.}\nut #2\par + \else + \centerline{\box\@tempboxa}% + \fi} + +\newcommand{\nocaptionrule}{% + \@setflag \@caprule = \@false} + +\def \@setfigurenumber #1{% + {\rmfamily \bfseries \selectfont #1}} + +% Hierarchy +% --------- + + +\setcounter{secnumdepth}{\@numheaddepth} + +\newskip{\@sectionaboveskip} +\setvspace{\@sectionaboveskip}{10pt plus 3pt minus 2pt} + +\newskip{\@sectionbelowskip} +\if \@blockstyle + \setlength{\@sectionbelowskip}{0.1pt}% +\else + \setlength{\@sectionbelowskip}{4pt}% +\fi + +\renewcommand{\section}{% + \@startsection + {section}% + {1}% + {0pt}% + {-\@sectionaboveskip}% + {\@sectionbelowskip}% + {\large \bfseries \raggedright}} + +\newskip{\@subsectionaboveskip} +\setvspace{\@subsectionaboveskip}{8pt plus 2pt minus 2pt} + +\newskip{\@subsectionbelowskip} +\if \@blockstyle + \setlength{\@subsectionbelowskip}{0.1pt}% +\else + \setlength{\@subsectionbelowskip}{4pt}% +\fi + +\renewcommand{\subsection}{% + \@startsection% + {subsection}% + {2}% + {0pt}% + {-\@subsectionaboveskip}% + {\@subsectionbelowskip}% + {\normalsize \bfseries \raggedright}} + +\renewcommand{\subsubsection}{% + \@startsection% + {subsubsection}% + {3}% + {0pt}% + {-\@subsectionaboveskip} + {\@subsectionbelowskip}% + {\normalsize \bfseries \raggedright}} + +\newskip{\@paragraphaboveskip} +\setvspace{\@paragraphaboveskip}{6pt plus 2pt minus 2pt} + +\renewcommand{\paragraph}{% + \@startsection% + {paragraph}% + {4}% + {0pt}% + {\@paragraphaboveskip} + {-1em}% + {\normalsize \bfseries \if \@times \itshape \fi}} + +\renewcommand{\subparagraph}{% + \@startsection% + {subparagraph}% + {4}% + {0pt}% + {\@paragraphaboveskip} + {-1em}% + {\normalsize \itshape}} + +% Standard headings: + +\newcommand{\acks}{\section*{Acknowledgments}} + +\newcommand{\keywords}{\paragraph*{Keywords}} + +\newcommand{\terms}{\paragraph*{General Terms}} + +% Identification +% -------------- + + +\def \@conferencename {} +\def \@conferenceinfo {} +\def \@copyrightyear {} +\def \@copyrightdata {[to be supplied]} +\def \@proceedings {[Unknown Proceedings]} + + +\newcommand{\conferenceinfo}[2]{% + \gdef \@conferencename {#1}% + \gdef \@conferenceinfo {#2}} + +\newcommand{\copyrightyear}[1]{% + \gdef \@copyrightyear {#1}} + +\let \CopyrightYear = \copyrightyear + +\newcommand{\copyrightdata}[1]{% + \gdef \@copyrightdata {#1}} + +\let \crdata = \copyrightdata + +\newcommand{\doi}[1]{% + \gdef \@doi {#1}} + +\newcommand{\proceedings}[1]{% + \gdef \@proceedings {#1}} + +% Lists +% ----- + + +\setlength{\leftmargini}{13pt} +\setlength\leftmarginii{13pt} +\setlength\leftmarginiii{13pt} +\setlength\leftmarginiv{13pt} +\setlength{\labelsep}{3.5pt} + +\setlength{\topsep}{\standardvspace} +\if \@blockstyle + \setlength{\itemsep}{1pt} + \setlength{\parsep}{3pt} +\else + \setlength{\itemsep}{1pt} + \setlength{\parsep}{3pt} +\fi + +\renewcommand{\labelitemi}{{\small \centeroncapheight{\textbullet}}} +\renewcommand{\labelitemii}{\centeroncapheight{\rule{2.5pt}{2.5pt}}} +\renewcommand{\labelitemiii}{$-$} +\renewcommand{\labelitemiv}{{\Large \textperiodcentered}} + +\renewcommand{\@listi}{% + \leftmargin = \leftmargini + \listparindent = 0pt} +%%% \itemsep = 1pt +%%% \parsep = 3pt} +%%% \listparindent = \parindent} + +\let \@listI = \@listi + +\renewcommand{\@listii}{% + \leftmargin = \leftmarginii + \topsep = 1pt + \labelwidth = \leftmarginii + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +\renewcommand{\@listiii}{% + \leftmargin = \leftmarginiii + \labelwidth = \leftmarginiii + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +\renewcommand{\@listiv}{% + \leftmargin = \leftmarginiv + \labelwidth = \leftmarginiv + \advance \labelwidth by -\labelsep + \listparindent = \parindent} + +% Mathematics +% ----------- + + +\def \theequation {\arabic{equation}} + +% Miscellaneous +% ------------- + + +\newcommand{\balancecolumns}{% + \vfill\eject + \global\@colht = \textheight + \global\ht\@cclv = \textheight} + +\newcommand{\nut}{\hspace{.5em}} + +\newcommand{\softraggedright}{% + \let \\ = \@centercr + \leftskip = 0pt + \rightskip = 0pt plus 10pt} + +% Program Code +% ------- ---- + + +\newcommand{\mono}[1]{% + {\@tempdima = \fontdimen2\font + \texttt{\spaceskip = 1.1\@tempdima #1}}} + +% Running Heads and Feet +% ------- ----- --- ---- + + +\def \@preprintfooter {} + +\newcommand{\preprintfooter}[1]{% + \gdef \@preprintfooter {#1}} + +\if \@preprint + +\def \ps at plain {% + \let \@mkboth = \@gobbletwo + \let \@evenhead = \@empty + \def \@evenfoot {\scriptsize + \rlap{\textit{\@preprintfooter}}\hfil + \thepage \hfil + \llap{\textit{\@formatyear}}}% + \let \@oddhead = \@empty + \let \@oddfoot = \@evenfoot} + +\else\if \@reprint + +\def \ps at plain {% + \let \@mkboth = \@gobbletwo + \let \@evenhead = \@empty + \def \@evenfoot {\scriptsize \hfil \thepage \hfil}% + \let \@oddhead = \@empty + \let \@oddfoot = \@evenfoot} + +\else + +\let \ps at plain = \ps at empty +\let \ps at headings = \ps at empty +\let \ps at myheadings = \ps at empty + +\fi\fi + +\def \@formatyear {% + \number\year/\number\month/\number\day} + +% Special Characters +% ------- ---------- + + +\DeclareRobustCommand{\euro}{% + \protect{\rlap{=}}{\sf \kern .1em C}} + +% Title Page +% ----- ---- + + +\@setflag \@addauthorsdone = \@false + +\def \@titletext {\@latex at error{No title was provided}{}} +\def \@subtitletext {} + +\newcount{\@authorcount} + +\newcount{\@titlenotecount} +\newtoks{\@titlenotetext} + +\def \@titlebanner {} + +\renewcommand{\title}[1]{% + \gdef \@titletext {#1}} + +\newcommand{\subtitle}[1]{% + \gdef \@subtitletext {#1}} + +\newcommand{\authorinfo}[3]{% {names}{affiliation}{email/URL} + \global\@increment \@authorcount + \@withname\gdef {\@authorname\romannumeral\@authorcount}{#1}% + \@withname\gdef {\@authoraffil\romannumeral\@authorcount}{#2}% + \@withname\gdef {\@authoremail\romannumeral\@authorcount}{#3}} + +\renewcommand{\author}[1]{% + \@latex at error{The \string\author\space command is obsolete; + use \string\authorinfo}{}} + +\newcommand{\titlebanner}[1]{% + \gdef \@titlebanner {#1}} + +\renewcommand{\maketitle}{% + \pagestyle{plain}% + \if \@onecolumn + {\hsize = \standardtextwidth + \@maketitle}% + \else + \twocolumn[\@maketitle]% + \fi + \@placetitlenotes + \if \@copyrightwanted \@copyrightspace \fi} + +\def \@maketitle {% + \begin{center} + \@settitlebanner + \let \thanks = \titlenote + {\leftskip = 0pt plus 0.25\linewidth + \rightskip = 0pt plus 0.25 \linewidth + \parfillskip = 0pt + \spaceskip = .7em + \noindent \LARGE \bfseries \@titletext \par} + \vskip 6pt + \noindent \Large \@subtitletext \par + \vskip 12pt + \ifcase \@authorcount + \@latex at error{No authors were specified for this paper}{}\or + \@titleauthors{i}{}{}\or + \@titleauthors{i}{ii}{}\or + \@titleauthors{i}{ii}{iii}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{xi}{}\or + \@titleauthors{i}{ii}{iii}\@titleauthors{iv}{v}{vi}% + \@titleauthors{vii}{viii}{ix}\@titleauthors{x}{xi}{xii}% + \else + \@latex at error{Cannot handle more than 12 authors}{}% + \fi + \vspace{1.75pc} + \end{center}} + +\def \@settitlebanner {% + \if \@andp{\@preprint}{\@notp{\@emptydefp{\@titlebanner}}}% + \vbox to 0pt{% + \vskip -32pt + \noindent \textbf{\@titlebanner}\par + \vss}% + \nointerlineskip + \fi} + +\def \@titleauthors #1#2#3{% + \if \@andp{\@emptyargp{#2}}{\@emptyargp{#3}}% + \noindent \@setauthor{40pc}{#1}{\@false}\par + \else\if \@emptyargp{#3}% + \noindent \@setauthor{17pc}{#1}{\@false}\hspace{3pc}% + \@setauthor{17pc}{#2}{\@false}\par + \else + \noindent \@setauthor{12.5pc}{#1}{\@false}\hspace{2pc}% + \@setauthor{12.5pc}{#2}{\@false}\hspace{2pc}% + \@setauthor{12.5pc}{#3}{\@true}\par + \relax + \fi\fi + \vspace{20pt}} + +\def \@setauthor #1#2#3{% {width}{text}{unused} + \vtop{% + \def \and {% + \hspace{16pt}} + \hsize = #1 + \normalfont + \centering + \large \@name{\@authorname#2}\par + \vspace{5pt} + \normalsize \@name{\@authoraffil#2}\par + \vspace{2pt} + \textsf{\@name{\@authoremail#2}}\par}} + +\def \@maybetitlenote #1{% + \if \@andp{#1}{\@gtrp{\@authorcount}{3}}% + \titlenote{See page~\pageref{@addauthors} for additional authors.}% + \fi} + +\newtoks{\@fnmark} + +\newcommand{\titlenote}[1]{% + \global\@increment \@titlenotecount + \ifcase \@titlenotecount \relax \or + \@fnmark = {\ast}\or + \@fnmark = {\dagger}\or + \@fnmark = {\ddagger}\or + \@fnmark = {\S}\or + \@fnmark = {\P}\or + \@fnmark = {\ast\ast}% + \fi + \,$^{\the\@fnmark}$% + \edef \reserved at a {\noexpand\@appendtotext{% + \noexpand\@titlefootnote{\the\@fnmark}}}% + \reserved at a{#1}} + +\def \@appendtotext #1#2{% + \global\@titlenotetext = \expandafter{\the\@titlenotetext #1{#2}}} + +\newcount{\@authori} + +\iffalse +\def \additionalauthors {% + \if \@gtrp{\@authorcount}{3}% + \section{Additional Authors}% + \label{@addauthors}% + \noindent + \@authori = 4 + {\let \\ = ,% + \loop + \textbf{\@name{\@authorname\romannumeral\@authori}}, + \@name{\@authoraffil\romannumeral\@authori}, + email: \@name{\@authoremail\romannumeral\@authori}.% + \@increment \@authori + \if \@notp{\@gtrp{\@authori}{\@authorcount}} \repeat}% + \par + \fi + \global\@setflag \@addauthorsdone = \@true} +\fi + +\let \addauthorsection = \additionalauthors + +\def \@placetitlenotes { + \the\@titlenotetext} + +% Utilities +% --------- + + +\newcommand{\centeroncapheight}[1]{% + {\setbox\@tempboxa = \hbox{#1}% + \@measurecapheight{\@tempdima}% % Calculate ht(CAP) - ht(text) + \advance \@tempdima by -\ht\@tempboxa % ------------------ + \divide \@tempdima by 2 % 2 + \raise \@tempdima \box\@tempboxa}} + +\newbox{\@measbox} + +\def \@measurecapheight #1{% {\dimen} + \setbox\@measbox = \hbox{ABCDEFGHIJKLMNOPQRSTUVWXYZ}% + #1 = \ht\@measbox} + +\long\def \@titlefootnote #1#2{% + \insert\footins{% + \reset at font\footnotesize + \interlinepenalty\interfootnotelinepenalty + \splittopskip\footnotesep + \splitmaxdepth \dp\strutbox \floatingpenalty \@MM + \hsize\columnwidth \@parboxrestore +%%% \protected at edef\@currentlabel{% +%%% \csname p at footnote\endcsname\@thefnmark}% + \color at begingroup + \def \@makefnmark {$^{#1}$}% + \@makefntext{% + \rule\z@\footnotesep\ignorespaces#2\@finalstrut\strutbox}% + \color at endgroup}} + +% LaTeX Modifications +% ----- ------------- + +\def \@seccntformat #1{% + \@name{\the#1}% + \@expandaftertwice\@seccntformata \csname the#1\endcsname.\@mark + \quad} + +\def \@seccntformata #1.#2\@mark{% + \if \@emptyargp{#2}.\fi} + +% Revision History +% -------- ------- + + +% Date Person Ver. Change +% ---- ------ ---- ------ + +% 2004.09.12 PCA 0.1--4 Preliminary development. + +% 2004.11.18 PCA 0.5 Start beta testing. + +% 2004.11.19 PCA 0.6 Obsolete \author and replace with +% \authorinfo. +% Add 'nocopyrightspace' option. +% Compress article opener spacing. +% Add 'mathtime' option. +% Increase text height by 6 points. + +% 2004.11.28 PCA 0.7 Add 'cm/computermodern' options. +% Change default to Times text. + +% 2004.12.14 PCA 0.8 Remove use of mathptm.sty; it cannot +% coexist with latexsym or amssymb. + +% 2005.01.20 PCA 0.9 Rename class file to sigplanconf.cls. + +% 2005.03.05 PCA 0.91 Change default copyright data. + +% 2005.03.06 PCA 0.92 Add at-signs to some macro names. + +% 2005.03.07 PCA 0.93 The 'onecolumn' option defaults to '11pt', +% and it uses the full type width. + +% 2005.03.15 PCA 0.94 Add at-signs to more macro names. +% Allow margin paragraphs during review. + +% 2005.03.22 PCA 0.95 Implement \euro. +% Remove proof and newdef environments. + +% 2005.05.06 PCA 1.0 Eliminate 'onecolumn' option. +% Change footer to small italic and eliminate +% left portion if no \preprintfooter. +% Eliminate copyright notice if preprint. +% Clean up and shrink copyright box. + +% 2005.05.30 PCA 1.1 Add alternate permission statements. + +% 2005.06.29 PCA 1.1 Publish final first edition of guide. + +% 2005.07.14 PCA 1.2 Add \subparagraph. +% Use block paragraphs in lists, and adjust +% spacing between items and paragraphs. + +% 2006.06.22 PCA 1.3 Add 'reprint' option and associated +% commands. + +% 2006.08.24 PCA 1.4 Fix bug in \maketitle case command. + +% 2007.03.13 PCA 1.5 The title banner only displays with the +% 'preprint' option. + +% 2007.06.06 PCA 1.6 Use \bibfont in \thebibliography. +% Add 'natbib' option to load and configure +% the natbib package. + +% 2007.11.20 PCA 1.7 Balance line lengths in centered article +% title (thanks to Norman Ramsey). + +% 2009.01.26 PCA 1.8 Change natbib \bibpunct values. + +% 2009.03.24 PCA 1.9 Change natbib to use the 'numbers' option. +% Change templates to use 'natbib' option. + +% 2009.09.01 PCA 2.0 Add \reprintprice command (suggested by +% Stephen Chong). + +% 2009.09.08 PCA 2.1 Make 'natbib' the default; add 'nonatbib'. +% SB Add 'authoryear' and 'numbers' (default) to +% control citation style when using natbib. +% Add \bibpunct to change punctuation for +% 'authoryear' style. + +% 2009.09.21 PCA 2.2 Add \softraggedright to the thebibliography +% environment. Also add to template so it will +% happen with natbib. + +% 2009.09.30 PCA 2.3 Remove \softraggedright from thebibliography. +% Just include in the template. + +% 2010.05.24 PCA 2.4 Obfuscate class author's email address. + +% 2011.11.08 PCA 2.5 Add copyright notice to this file. +% Remove 'sort' option from natbib when using +% 'authoryear' style. +% Add the \authorversion command. + +% 2013.02.22 PCA 2.6 Change natbib fences to parentheses when +% using 'authoryear' style. + +% 2013.05.17 PCA 2.7 Change standard and author copyright text. + +% 2013.07.02 TU 2.8 More changes to permission/copyright notes. +% Replaced ambiguous \authorpermission with +% \exclusivelicense and \permissiontopublish + + From noreply at buildbot.pypy.org Thu May 15 09:33:34 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 15 May 2014 09:33:34 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: fix spaces/layout Message-ID: <20140515073334.100621C02F3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71526:f9ddc5b3ad29 Date: 2014-05-14 11:40 -0700 http://bitbucket.org/pypy/pypy/changeset/f9ddc5b3ad29/ Log: fix spaces/layout diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -754,7 +754,7 @@ assert not cppyy.gbl.nullptr assert c.s_voidp is cppyy.gbl.nullptr - assert CppyyTestData.s_voidp is cppyy.gbl.nullptr + assert CppyyTestData.s_voidp is cppyy.gbl.nullptr assert c.m_voidp is cppyy.gbl.nullptr assert c.get_voidp() is cppyy.gbl.nullptr From noreply at buildbot.pypy.org Thu May 15 09:33:35 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 15 May 2014 09:33:35 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: make sure module cppyy is initialized before test (to create executors/converters) Message-ID: <20140515073335.520111C02F3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71527:a5007f9743c4 Date: 2014-05-14 23:08 -0700 http://bitbucket.org/pypy/pypy/changeset/a5007f9743c4/ Log: make sure module cppyy is initialized before test (to create executors/converters) diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -15,6 +15,8 @@ raise OSError("'make' failed (see stderr)") class TestCPPYYImplementation: + spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) + def test01_class_query(self, space): # NOTE: this test needs to run before test_pythonify.py dct = interp_cppyy.load_dictionary(space, test_dct) From noreply at buildbot.pypy.org Thu May 15 09:33:36 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 15 May 2014 09:33:36 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: enable lazy lookup of templated functions Message-ID: <20140515073336.738AB1C02F3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71528:646b4e87506c Date: 2014-05-14 23:37 -0700 http://bitbucket.org/pypy/pypy/changeset/646b4e87506c/ Log: enable lazy lookup of templated functions diff --git a/pypy/module/cppyy/src/reflexcwrapper.cxx b/pypy/module/cppyy/src/reflexcwrapper.cxx --- a/pypy/module/cppyy/src/reflexcwrapper.cxx +++ b/pypy/module/cppyy/src/reflexcwrapper.cxx @@ -370,9 +370,12 @@ // unsorted std::vector anyway, so there's no gain to be had in using the // Scope::FunctionMemberByName() function int num_meth = s.FunctionMemberSize(); + std::string::size_type name_sz = strlen(name); for (int imeth = 0; imeth < num_meth; ++imeth) { Reflex::Member m = s.FunctionMemberAt(imeth); - if (m.Name() == name) { + const std::string& mName = m.Name(); + if (mName.find(name) == 0 && (mName.size() == name_sz /* exact match */ || + (mName.size() > name_sz && mName[name_sz] == '<') /* template */ )) { if (m.IsPublic()) result.push_back((cppyy_index_t)imeth); } From noreply at buildbot.pypy.org Thu May 15 09:33:37 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 15 May 2014 09:33:37 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: allow customization of __init__ and __del__ in a base class Message-ID: <20140515073337.A6DEB1C02F3@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r71529:a2cbc5204fef Date: 2014-05-15 00:32 -0700 http://bitbucket.org/pypy/pypy/changeset/a2cbc5204fef/ Log: allow customization of __init__ and __del__ in a base class diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -883,7 +883,6 @@ dname = capi.c_datamember_name(self.space, self, i) if dname: alldir.append(self.space.wrap(dname)) return self.space.newlist(alldir) - W_CPPNamespace.typedef = TypeDef( 'CPPNamespace', @@ -1182,6 +1181,7 @@ __cmp__ = interp2app(W_CPPInstance.instance__cmp__), __repr__ = interp2app(W_CPPInstance.instance__repr__), __destruct__ = interp2app(W_CPPInstance.destruct), + __del__ = interp2app(W_CPPInstance.__del__), ) W_CPPInstance.typedef.acceptable_as_base_class = True @@ -1230,12 +1230,19 @@ state = space.fromcache(State) return space.call_function(state.w_fngen_callback, w_callable, space.wrap(npar)) -def wrap_cppobject(space, rawobject, cppclass, +def wrap_cppobject(space, rawobject, cppclass, w_pycppclass=None, do_cast=True, python_owns=False, is_ref=False, fresh=False): - rawobject = rffi.cast(capi.C_OBJECT, rawobject) + # Wrap a C++ object for use on the python side: + # rawobject : address pointing to the C++ object + # cppclass : rpython-side C++ class proxy + # w_pycppclass : wrapped python class (may be derived, by a developer) + # do_cast : calculate offset between given type and run-time type + # python_owns : sets _python_owns flag (True: will delete C++ on __del__) + # is_ref : True of rawobject is a pointer to the C++ object (i.e. &this) + # fresh : True if newly created object (e.g. from constructor) # cast to actual if requested and possible - w_pycppclass = None + rawobject = rffi.cast(capi.C_OBJECT, rawobject) if do_cast and rawobject: actual = capi.c_actual_class(space, cppclass, rawobject) if actual != cppclass.handle: @@ -1251,15 +1258,17 @@ # the variables are re-assigned yet) pass - if w_pycppclass is None: - w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) - # try to recycle existing object if this one is not newly created if not fresh and rawobject: obj = memory_regulator.retrieve(rawobject) if obj is not None and obj.cppclass is cppclass: return obj + # this is slow, so only call if really necessary (it may have been provided or set + # when calculating the casting offset above) + if w_pycppclass is None: + w_pycppclass = get_pythonized_cppclass(space, cppclass.handle) + # fresh creation w_cppinstance = space.allocate_instance(W_CPPInstance, w_pycppclass) cppinstance = space.interp_w(W_CPPInstance, w_cppinstance, can_be_None=False) @@ -1291,11 +1300,13 @@ except Exception: # accept integer value as address rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) + w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) if not w_cppclass: w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) if not w_cppclass: raise OperationError(space.w_TypeError, space.wrap("no such class: %s" % space.str_w(w_pycppclass))) + w_pycppclass = None cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) - return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) + return wrap_cppobject(space, rawobject, cppclass, w_pycppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -140,12 +140,10 @@ def make_new(class_name): def __new__(cls, *args): - # create a place-holder only as there may be a derived class defined - import cppyy - instance = cppyy.bind_object(0, class_name, True) - if not instance.__class__ is cls: - instance.__class__ = cls # happens for derived class - return instance + # create a place-holder (python instance + nullptr) only as there may + # be a derived class defined; __init__ allocates and fills the ptr + import cppyy # lazy + return cppyy.bind_object(0, cls, True) return __new__ def make_pycppclass(scope, class_name, final_class_name, cppclass): @@ -175,7 +173,7 @@ "__new__" : make_new(class_name), } pycppclass = metacpp(class_name, _drop_cycles(bases), d) - + # cache result early so that the class methods can find the class itself setattr(scope, final_class_name, pycppclass) @@ -204,8 +202,7 @@ # needs to run first, so that the generic pythonizations can use them import cppyy cppyy._register_class(pycppclass) - _pythonize(pycppclass) - return pycppclass + return _pythonize(pycppclass) def make_cpptemplatetype(scope, template_name): return CPPTemplate(template_name, scope) @@ -222,14 +219,14 @@ pycppitem = None - # classes + # scopes cppitem = cppyy._scope_byname(true_name) if cppitem: if cppitem.is_namespace(): pycppitem = make_cppnamespace(scope, true_name, cppitem) - setattr(scope, name, pycppitem) else: pycppitem = make_pycppclass(scope, true_name, name, cppitem) + setattr(scope, name, pycppitem) # enums (special case) if not cppitem: @@ -316,14 +313,22 @@ else: return python_style_getitem(self, slice_or_idx) -_pythonizations = {} +_specific_pythonizations = {} +_global_pythonizations = [] def _pythonize(pyclass): try: - _pythonizations[pyclass.__name__](pyclass) + cbs = _specific_pythonizations[pyclass.__name__] + for cb in cbs: + res = cb(pyclass) + if res: pyclass = res except KeyError: pass + for cb in _global_pythonizations: + res = cb(pyclass) + if res: pyclass = res + # general note: use 'in pyclass.__dict__' rather than 'hasattr' to prevent # adding pythonizations multiple times in derived classes @@ -373,7 +378,7 @@ while i != self.end(): yield i.__deref__() i.__preinc__() - i.destruct() + i.__destruct__() raise StopIteration pyclass.__iter__ = __iter__ # else: rely on numbered iteration @@ -407,6 +412,8 @@ pyclass.__getitem__ = getitem pyclass.__len__ = return2 + return pyclass + _loaded_dictionaries = {} def load_reflection_info(name): """Takes the name of a library containing reflection info, returns a handle @@ -418,7 +425,7 @@ lib = cppyy._load_dictionary(name) _loaded_dictionaries[name] = lib return lib - + def _init_pythonify(): # cppyy should not be loaded at the module level, as that will trigger a # call to space.getbuiltinmodule(), which will cause cppyy to be loaded @@ -464,11 +471,16 @@ sys.modules['cppyy.gbl.std'] = gbl.std # user-defined pythonizations interface -_pythonizations = {} def add_pythonization(class_name, callback): """Takes a class name and a callback. The callback should take a single argument, the class proxy, and is called the first time the named class is bound.""" if not callable(callback): raise TypeError("given '%s' object is not callable" % str(callback)) - _pythonizations[class_name] = callback + if class_name == '*': + _global_pythonizations.append(callback) + else: + try: + _specific_pythonizations[class_name].append(callback) + except KeyError: + _specific_pythonizations[class_name] = [callback] diff --git a/pypy/module/cppyy/test/example01.h b/pypy/module/cppyy/test/example01.h --- a/pypy/module/cppyy/test/example01.h +++ b/pypy/module/cppyy/test/example01.h @@ -113,3 +113,18 @@ public: example01a(int a) : example01(a) {} }; + +class example01b : public example01 { +public: + example01b(int a) : example01(a) {} +}; + +class example01c : public example01 { +public: + example01c(int a) : example01(a) {} +}; + +class example01d : public example01 { +public: + example01d(int a) : example01(a) {} +}; diff --git a/pypy/module/cppyy/test/example01.xml b/pypy/module/cppyy/test/example01.xml --- a/pypy/module/cppyy/test/example01.xml +++ b/pypy/module/cppyy/test/example01.xml @@ -3,7 +3,7 @@ - + diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -371,6 +371,40 @@ assert example01.getCount() == 0 + class MyClass3(example01): + def __init__(self, *args): + example01.__init__(self, *args) + + raises(TypeError, MyClass3, 'hi') + o = MyClass3(312) + assert type(o) == MyClass3 + assert example01.getCount() == 1 + assert o.m_somedata == 312 + o.__destruct__() + + assert example01.getCount() == 0 + + class MyClass4(example01): + pycount = 0 + def __init__(self, *args): + example01.__init__(self, *args) + MyClass4.pycount += 1 + def __del__(self): + example01.__del__(self) + MyClass4.pycount -= 1 + + o = MyClass4() + assert type(o) == MyClass4 + assert example01.getCount() == 1 + assert MyClass4.pycount == 1 + del o + + import gc + gc.collect() + + assert MyClass4.pycount == 0 + assert example01.getCount() == 0 + class AppTestPYTHONIFY_UI: spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools']) @@ -386,12 +420,13 @@ import cppyy + # simple pythonization def example01a_pythonize(pyclass): - assert pyclass.__name__ == 'example01a' + import cppyy + assert issubclass(pyclass, cppyy.gbl.example01) def getitem(self, idx): return self.addDataToInt(idx) pyclass.__getitem__ = getitem - cppyy.add_pythonization('example01a', example01a_pythonize) e = cppyy.gbl.example01a(1) @@ -400,6 +435,79 @@ assert e[1] == 2 assert e[5] == 6 + # stacked pythonization + cppyy.add_pythonization('example01b', example01a_pythonize) + def example01b_pythonize(pyclass): + assert pyclass.__name__ == 'example01b' + def _len(self): + return self.m_somedata + pyclass.__len__ = _len + cppyy.add_pythonization('example01b', example01a_pythonize) + cppyy.add_pythonization('example01b', example01b_pythonize) + + e = cppyy.gbl.example01b(42) + + assert e[0] == 42 + assert e[1] == 43 + assert len(e) == 42 + + # class replacement + def example01c_pythonize(pyclass): + if pyclass.__name__ == 'example01c': + class custom(pyclass): + pycount = 0 + def __init__(self, *args): + custom.pycount += 1 + pyclass.__init__(self, *args) + def __del__(self): + pyclass.__del__(self) + custom.pycount -= 1 + return custom + cppyy.add_pythonization('*', example01c_pythonize) + + e = cppyy.gbl.example01c(88) + assert type(e) == cppyy.gbl.example01c + assert cppyy.gbl.example01c.getCount() == 1 + assert cppyy.gbl.example01c.pycount == 1 + assert e.m_somedata == 88 + del e + + import gc + gc.collect() + + assert cppyy.gbl.example01c.pycount == 0 + assert cppyy.gbl.example01c.getCount() == 0 + + # alt class replacement + def example01d_pythonize(pyclass): + if pyclass.__name__ == 'example01d': + d = {} + d['pycount'] = 0 + def __init__(self, *args): + self.__class__.pycount += 1 + pyclass.__init__(self, *args) + d['__init__'] = __init__ + def __del__(self, *args): + self.__class__.pycount -= 1 + pyclass.__del__(self) + d['__del__'] = __del__ + return pyclass.__class__('_'+pyclass.__name__, (pyclass,), d) + cppyy.add_pythonization('*', example01d_pythonize) + + e = cppyy.gbl.example01d(101) + assert type(e) == cppyy.gbl.example01d + assert cppyy.gbl.example01d.getCount() == 1 + assert cppyy.gbl.example01d.pycount == 1 + assert e.m_somedata == 101 + del e + + import gc + gc.collect() + + assert cppyy.gbl.example01d.pycount == 0 + assert cppyy.gbl.example01d.getCount() == 0 + + def test02_fragile_pythonizations(self): """Test pythonizations error reporting""" From noreply at buildbot.pypy.org Thu May 15 12:39:06 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 15 May 2014 12:39:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some tweaks and several notes Message-ID: <20140515103906.CF8911C0190@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5249:6f0c28385e2e Date: 2014-05-15 12:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/6f0c28385e2e/ Log: some tweaks and several notes diff --git a/talk/dls2014/report/report.tex b/talk/dls2014/report/report.tex --- a/talk/dls2014/report/report.tex +++ b/talk/dls2014/report/report.tex @@ -38,6 +38,7 @@ }% } \newcommand\remi[1]{\mynote{Remi}{#1}} +\newcommand\cfbolz[1]{\mynote{cfbolz}{#1}} % Title. % ------ @@ -145,6 +146,8 @@ mechanism that avoids several of the problems of locks as they are used now. +\cfbolz{the above is good, here is something missing: problems with current STM approaches, outlining the intuition behind the new one} + Our contributions include: \begin{itemize}[noitemsep] \item We introduce a new software transactional memory (STM) system @@ -177,13 +180,16 @@ If we start multiple such transactions in multiple threads, the TM system guarantees that the outcome of running the transactions is \emph{serializable}. Meaning, the outcome is equal to some sequential -execution of these transactions. Overall, this is exactly what a -single global lock guarantees while still allowing the TM system to +execution of these transactions. This means that the approach provides the same +semantics as using the GIL +while still allowing the TM system to run transactions in parallel as an optimization. \subsection{Python} +\cfbolz{a pypy introduction needs to go somewhere, a paragraph or so. maybe in the evaluation section} + We implement and evaluate our system for the Python language. For the actual implementation, we chose the PyPy interpreter because replacing the GIL there with a TM system is just a matter of adding a new @@ -219,10 +225,12 @@ \subsection{Synchronization} + cfbolz{citation again needed for the whole subsection} + It is well known that using locks to synchronize multiple threads is hard. They are non-composable, have overhead, may deadlock, limit scalability, and overall add a lot of complexity. For a better -parallel programming model for dynamic languages, we want to add +parallel programming model for dynamic languages, we want to implement another, well-known synchronization mechanism: \emph{atomic blocks}. Atomic blocks are composable, deadlock-free, higher-level and expose @@ -245,9 +253,13 @@ should clarify the general semantics using commonly used terms from the literature. +\cfbolz{there is an overview paragraph of the idea missing, maybe in the introduction} + +\cfbolz{this all feels very much dumping details, needs more overview. why is this info important? the subsubsections don't have any connections} \subsubsection{Conflict Handling} + Our conflict detection works with \emph{object granularity}. Conceptually, it is based on \emph{read} and \emph{write sets} of transactions. Two transactions conflict if they @@ -276,6 +288,8 @@ the isolation provides full \emph{opacity} to always guarantee a consistent read set. +\cfbolz{this paragraph is hard to understand without giving an example (eg console printing) when it is useful} + We support the notion of \emph{inevitable transactions} that are always guaranteed to commit. There is always at most one such transaction running in the system. We use this kind of transaction to provide @@ -321,7 +335,7 @@ threads. To get references to objects that are valid in all threads, we will -use the object's offset inside the segment. Since all segments are +use \cfbolz{use for what?} the object's offset inside the segment. Since all segments are copies of each other, the \emph{Segment Offset (SO)} will point to the private version of an object in all threads/segments. To then translate this SO to a real virtual memory address when used inside a @@ -329,6 +343,8 @@ SO. The result of this operation is called a \emph{Linear Address (LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. +\cfbolz{here it needs to say that this is x86 specific} + To make this address translation efficient, we use the segment register $\%gs$. When this register points to a thread's segment start address, we can instruct the CPU to perform the above translation from @@ -444,7 +460,7 @@ \item [{Read~Barrier:}] Adds the object to the read set of the current transaction. Since our two-step address translation automatically resolves the reference to the private version of the object on every - access anyway, this is not the job of the read barrier anymore. + access anyway, the read barrier does not need to do address translation anymore. \item [{Write~Barrier:}] Adds the object to the read and write set of the current transaction and checks if all pages of the object are private, doing COW otherwise.\\ @@ -461,7 +477,7 @@ \subsubsection{Atomicity: Commit \& Abort} -To provide atomicity for a transaction, we want to make changes +To provide atomicity for a transaction, we want to make changes globally visible on commit. We also need to be able to completely abort a transaction without a trace, like it never happened. \begin{description} @@ -473,13 +489,15 @@ transaction waiting or aborting.\\ We then push all changes of modified objects in private pages to all the pages in other segments, including the sharing-segment (segment - 0). + 0). \cfbolz{can it really happen that you push pages to other segments? I thought it's always just back to the sharing segment} \item [{Abort:}] On abort the transaction will forget about all the changes it has done. All objects in the write set are reset by copying their previous version from the sharing-segment into the private pages of the aborting transaction. + \cfbolz{why doing any copying? aren't the pages re-shared instead?} \end{description} +\cfbolz{random question: did we investigate the extra memory requirements? we should characterize memory overhead somewhere, eg at least one byte per object for the read markers} \subsubsection{Summary} @@ -583,7 +601,7 @@ first generation. \item [{Old~object~space:}] These pages are the ones that are really shared between segments. They mostly contain old objects but also - some young ones that were too big to allocate in the nursery. + some young ones that were too big to be allocated in the nursery. \end{description} @@ -613,8 +631,7 @@ Therefore, a thread may be assigned to different segments each time it starts a transaction. Although, we try to assign it the same segment -again if possible. And a maximum of $N$ transactions may run in -parallel. +again if possible. @@ -624,6 +641,8 @@ Garbage collection plays a big role in our TM system. The GC is generational and has two generations. +\cfbolz{maybe use "young" and "old" generation, if there are only two} + The \textbf{first generation}, where objects are considered to be \emph{young} and reside in the \emph{Nursery}, is collected by \emph{minor collections}. These collections move the surviving objects @@ -667,7 +686,7 @@ The point of the read barrier is to add the object to the read set of the transaction. This information is needed to detect conflicts -between transactions. Usually, it also resolves an object reference to +between transactions. In other STM systems, it also resolves an object reference to a private copy, but since the CPU performs our address translation on every object access efficiently, we do not need to do that in our barrier. @@ -797,7 +816,7 @@ set (\lstinline!modified_old_objects!) and check the corresponding \lstinline!read_markers! in other threads/segments. If we detect a read-write conflict, we do contention management to either abort us or -the other transaction, or to simply wait a bit. +the other transaction, or to simply wait a bit. \cfbolz{why does waiting help?} After verifying that there are no conflicts anymore, we copy all our changes done to the objects in the write set to all other segments, From noreply at buildbot.pypy.org Thu May 15 12:40:32 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 15 May 2014 12:40:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Updated jit tests. No real changes. Message-ID: <20140515104032.D0AC71C0190@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r828:17200555aaec Date: 2014-05-14 15:05 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/17200555aaec/ Log: Updated jit tests. No real changes. diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -8,38 +8,37 @@ 0 to: 1000000000 do: [:t|nil]. """) self.assert_matches(traces[0].loop, """ - i59 = int_le(i51, 1000000000), - guard_true(i59, descr=), - i60 = int_add(i51, 1), - i61 = int_sub(i55, 1), - setfield_gc(ConstPtr(ptr52), i61, descr=), - i62 = int_le(i61, 0), - guard_false(i62, descr=), - jump(p0, p3, i60, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i61, descr=TargetToken(53667152)) + i58 = int_le(i50, 1000000000), + guard_true(i58, descr=), + i59 = int_add(i50, 1), + i60 = int_sub(i54, 1), + setfield_gc(ConstPtr(ptr51), i60, descr=), + i61 = int_le(i60, 0), + guard_false(i61, descr=), + jump(p0, i1, p3, i59, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i60, descr=TargetToken(49337584)) """) self.assert_matches(traces[0].bridges[0], """ - - f18 = call(ConstClass(ll_time.ll_time_time), descr=), - setfield_gc(ConstPtr(ptr19), 10000, descr=), - guard_no_exception(descr=), - f22 = float_sub(f18, 1396948969.119000), - f24 = float_mul(f22, 1000.000000), - i25 = cast_float_to_int(f24), - i27 = int_and(i25, 2147483647), - i28 = getfield_gc(ConstPtr(ptr19), descr=), - i29 = int_is_zero(i28), - guard_true(i29, descr=), - label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, descr=TargetToken(48874112)), - guard_class(p0, 23085560, descr=), - p31 = getfield_gc(p0, descr=), - p32 = getfield_gc(p31, descr=), - guard_value(p31, ConstPtr(ptr33), descr=), - guard_value(p32, ConstPtr(ptr34), descr=), - i36 = int_le(i16, 1000000000), - guard_true(i36, descr=), - i38 = int_add(i16, 1), - setfield_gc(ConstPtr(ptr19), 9999, descr=), - jump(p0, p1, i38, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, 9999, descr=TargetToken(48817488)) + f19 = call(ConstClass(ll_time.ll_time_time), descr=), + setfield_gc(ConstPtr(ptr20), 10000, descr=), + guard_no_exception(descr=), + f23 = float_sub(f19, 1400072025.015000), + f25 = float_mul(f23, 1000.000000), + i26 = cast_float_to_int(f25), + i28 = int_and(i26, 2147483647), + i29 = getfield_gc(ConstPtr(ptr20), descr=), + i30 = int_is_zero(i29), + guard_true(i30, descr=), + label(p0, i1, p2, i17, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, descr=TargetToken(51495504)), + guard_class(p0, 23562720, descr=), + p32 = getfield_gc(p0, descr=), + p33 = getfield_gc(p32, descr=), + guard_value(p32, ConstPtr(ptr34), descr=), + guard_value(p33, ConstPtr(ptr35), descr=), + i37 = int_le(i17, 1000000000), + guard_true(i37, descr=), + i39 = int_add(i17, 1), + setfield_gc(ConstPtr(ptr20), 9999, descr=), + jump(p0, i1, p2, i39, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, 9999, descr=TargetToken(51434784)) """) def test_constant_string(self, spy, tmpdir): @@ -50,16 +49,16 @@ ^ i """) self.assert_matches(traces[0].loop, """ - i77 = int_le(i69, 10000), - guard_true(i77, descr=), - guard_not_invalidated(descr=), - i78 = int_add_ovf(i69, i68), - guard_no_overflow(descr=), - i79 = int_sub(i72, 1), - setfield_gc(ConstPtr(ptr66), i79, descr=), - i80 = int_le(i79, 0), - guard_false(i80, descr=), - jump(p0, p3, i78, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i68, i79, descr=TargetToken(16561632)) + i76 = int_le(i68, 10000), + guard_true(i76, descr=), + guard_not_invalidated(descr=), + i77 = int_add_ovf(i68, i67), + guard_no_overflow(descr=), + i78 = int_sub(i71, 1), + setfield_gc(ConstPtr(ptr65), i78, descr=), + i79 = int_le(i78, 0), + guard_false(i79, descr=), + jump(p0, i1, p3, i77, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i67, i78, descr=TargetToken(52151536)) """) def test_constant_string_equal2(self, spy, tmpdir): @@ -75,16 +74,16 @@ ^ i """) self.assert_matches(traces[0].loop, """ - i79 = int_le(i71, 100000), - guard_true(i79, descr=), - i80 = int_add(i71, 1), - i81 = int_sub(i75, 1), - setfield_gc(ConstPtr(ptr72), i81, descr=), - i82 = int_le(i81, 0), - guard_false(i82, descr=), - i84 = arraylen_gc(p65, descr=), - i85 = arraylen_gc(p67, descr=), - jump(p0, p3, i80, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i81, p65, p67, descr=TargetToken(57534304)) + i78 = int_le(i70, 100000), + guard_true(i78, descr=), + i79 = int_add(i70, 1), + i80 = int_sub(i74, 1), + setfield_gc(ConstPtr(ptr71), i80, descr=), + i81 = int_le(i80, 0), + guard_false(i81, descr=), + i83 = arraylen_gc(p64, descr=), + i84 = arraylen_gc(p66, descr=), + jump(p0, i1, p3, i79, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i80, p64, p66, descr=TargetToken(19461888)) """) def test_constant_string_var_equal(self, spy, tmpdir): @@ -104,14 +103,14 @@ ^ i """) self.assert_matches(traces[0].loop, """ - i72 = int_le(i64, 100000), - guard_true(i72, descr=), - i73 = int_add(i64, 1), - i74 = int_sub(i68, 1), - setfield_gc(ConstPtr(ptr65), i74, descr=), - i75 = int_le(i74, 0), - guard_false(i75, descr=), - jump(p0, p3, i73, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i74, descr=TargetToken(48821968)) + i71 = int_le(i63, 100000), + guard_true(i71, descr=), + i72 = int_add(i63, 1), + i73 = int_sub(i67, 1), + setfield_gc(ConstPtr(ptr64), i73, descr=), + i74 = int_le(i73, 0), + guard_false(i74, descr=), + jump(p0, i1, p3, i72, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i73, descr=TargetToken(49338064)) """) def test_bitblt_fillWhite(self, spy, tmpdir): @@ -124,241 +123,241 @@ Display beDisplay. 1 to: 10000 do: [:i | Display fillWhite]. """) self.assert_matches(traces[0].loop, """ - i598 = int_le(2, i153), - guard_false(i598, descr=), - i599 = getfield_gc_pure(p589, descr=), - i600 = int_add_ovf(i599, i162), - guard_no_overflow(descr=), - i601 = getfield_gc_pure(p592, descr=), - i602 = int_add_ovf(i601, i171), - guard_no_overflow(descr=), - i603 = int_add_ovf(i176, 1), - guard_no_overflow(descr=), - i604 = int_sub(i585, 1), - setfield_gc(ConstPtr(ptr177), i604, descr=), - i605 = int_le(i604, 0), - guard_false(i605, descr=), - i606 = int_le(i603, i187), - guard_true(i606, descr=), - guard_not_invalidated(descr=), - i607 = getfield_gc_pure(p364, descr=), - i608 = int_mod(i607, i224), - i609 = int_rshift(i608, 31), - i610 = int_and(i224, i609), - i611 = int_add(i608, i610), - i612 = int_add_ovf(1, i611), - guard_no_overflow(descr=), - i613 = int_ge(i611, 0), - guard_true(i613, descr=), - i614 = int_lt(i611, i224), - guard_true(i614, descr=), - i615 = getarrayitem_gc(p247, i611, descr=), - i616 = uint_lt(i615, 0) - guard_false(i616, descr=) - i617 = uint_lt(i615, 2147483647) - guard_true(i617, descr=) - i618 = int_add_ovf(i607, i256) - guard_no_overflow(descr=) - i619 = int_ge(i615, 0) - guard_true(i619, descr=) - i620 = int_and(i615, i615) - i621 = uint_lt(i620, 2147483647) - guard_true(i621, descr=) - i622 = int_add_ovf(i602, 1) - guard_no_overflow(descr=) - i623 = int_ge(i602, 0) - guard_true(i623, descr=) - i624 = int_lt(i602, i290) - guard_true(i624, descr=) - i625 = getarrayitem_raw(i292, i602, descr=) - i626 = uint_lt(i625, 0) - guard_false(i626, descr=) - i627 = uint_lt(i625, 2147483647) - guard_true(i627, descr=) - i628 = int_and(i328, i620) - i629 = uint_lt(i628, 2147483647) - guard_true(i629, descr=) - i630 = getarrayitem_raw(i292, i602, descr=) - i631 = uint_lt(i630, 0) - guard_false(i631, descr=) - i632 = uint_lt(i630, 2147483647) - guard_true(i632, descr=) - i633 = int_ge(i630, 0) - guard_true(i633, descr=) - i634 = int_and(i343, i630) - i635 = uint_lt(i634, 2147483647) - guard_true(i635, descr=) - i636 = int_ge(i628, 0) - guard_true(i636, descr=) - i637 = int_or(i628, i634) - i638 = uint_lt(i637, 2147483647) - guard_true(i638, descr=) - setarrayitem_raw(i292, i602, i637, descr=) - i640 = int_lshift(i602, 3) - i641 = int_ge(i640, i290) - guard_false(i641, descr=) - i642 = uint_rshift(i637, i386) - i643 = int_lshift(i637, i373) - i644 = uint_rshift(i643, i386) - i645 = int_lshift(i644, 8) - i646 = int_or(i642, i645) - i647 = int_lshift(i643, i373) - i648 = uint_rshift(i647, i386) - i649 = int_lshift(i648, 16) - i650 = int_or(i646, i649) - i651 = int_lshift(i647, i373) - i652 = uint_rshift(i651, i386) - i653 = int_lshift(i652, 24) - i654 = int_or(i650, i653) - i655 = int_lshift(i651, i373) - setarrayitem_raw(8650752, i640, i654, descr=) - i656 = int_add(i640, 1) - i657 = int_ge(i656, i290) - guard_false(i657, descr=) - i658 = uint_rshift(i655, i386) - i659 = int_lshift(i655, i373) - i660 = uint_rshift(i659, i386) - i661 = int_lshift(i660, 8) - i662 = int_or(i658, i661) - i663 = int_lshift(i659, i373) - i664 = uint_rshift(i663, i386) - i665 = int_lshift(i664, 16) - i666 = int_or(i662, i665) - i667 = int_lshift(i663, i373) - i668 = uint_rshift(i667, i386) - i669 = int_lshift(i668, 24) - i670 = int_or(i666, i669) - i671 = int_lshift(i667, i373) - setarrayitem_raw(8650752, i656, i670, descr=) - i672 = int_add(i656, 1) - i673 = int_ge(i672, i290) - guard_false(i673, descr=) - i674 = uint_rshift(i671, i386) - i675 = int_lshift(i671, i373) - i676 = uint_rshift(i675, i386) - i677 = int_lshift(i676, 8) - i678 = int_or(i674, i677) - i679 = int_lshift(i675, i373) - i680 = uint_rshift(i679, i386) - i681 = int_lshift(i680, 16) - i682 = int_or(i678, i681) - i683 = int_lshift(i679, i373) - i684 = uint_rshift(i683, i386) - i685 = int_lshift(i684, 24) - i686 = int_or(i682, i685) - i687 = int_lshift(i683, i373) - setarrayitem_raw(8650752, i672, i686, descr=) - i688 = int_add(i672, 1) - i689 = int_ge(i688, i290) - guard_false(i689, descr=) - i690 = uint_rshift(i687, i386) - i691 = int_lshift(i687, i373) - i692 = uint_rshift(i691, i386) - i693 = int_lshift(i692, 8) - i694 = int_or(i690, i693) - i695 = int_lshift(i691, i373) - i696 = uint_rshift(i695, i386) - i697 = int_lshift(i696, 16) - i698 = int_or(i694, i697) - i699 = int_lshift(i695, i373) - i700 = uint_rshift(i699, i386) - i701 = int_lshift(i700, 24) - i702 = int_or(i698, i701) - i703 = int_lshift(i699, i373) - setarrayitem_raw(8650752, i688, i702, descr=) - i704 = int_add(i688, 1) - i705 = int_ge(i704, i290) - guard_false(i705, descr=) - i706 = uint_rshift(i703, i386) - i707 = int_lshift(i703, i373) - i708 = uint_rshift(i707, i386) - i709 = int_lshift(i708, 8) - i710 = int_or(i706, i709) - i711 = int_lshift(i707, i373) - i712 = uint_rshift(i711, i386) - i713 = int_lshift(i712, 16) - i714 = int_or(i710, i713) - i715 = int_lshift(i711, i373) - i716 = uint_rshift(i715, i386) - i717 = int_lshift(i716, 24) - i718 = int_or(i714, i717) - i719 = int_lshift(i715, i373) - setarrayitem_raw(8650752, i704, i718, descr=) - i720 = int_add(i704, 1) - i721 = int_ge(i720, i290) - guard_false(i721, descr=) - i722 = uint_rshift(i719, i386) - i723 = int_lshift(i719, i373) - i724 = uint_rshift(i723, i386) - i725 = int_lshift(i724, 8) - i726 = int_or(i722, i725) - i727 = int_lshift(i723, i373) - i728 = uint_rshift(i727, i386) - i729 = int_lshift(i728, 16) - i730 = int_or(i726, i729) - i731 = int_lshift(i727, i373) - i732 = uint_rshift(i731, i386) - i733 = int_lshift(i732, 24) - i734 = int_or(i730, i733) - i735 = int_lshift(i731, i373) - setarrayitem_raw(8650752, i720, i734, descr=) - i736 = int_add(i720, 1) - i737 = int_ge(i736, i290) - guard_false(i737, descr=) - i738 = uint_rshift(i735, i386) - i739 = int_lshift(i735, i373) - i740 = uint_rshift(i739, i386) - i741 = int_lshift(i740, 8) - i742 = int_or(i738, i741) - i743 = int_lshift(i739, i373) - i744 = uint_rshift(i743, i386) - i745 = int_lshift(i744, 16) - i746 = int_or(i742, i745) - i747 = int_lshift(i743, i373) - i748 = uint_rshift(i747, i386) - i749 = int_lshift(i748, 24) - i750 = int_or(i746, i749) - i751 = int_lshift(i747, i373) - setarrayitem_raw(8650752, i736, i750, descr=) - i752 = int_add(i736, 1) - i753 = int_ge(i752, i290) - guard_false(i753, descr=) - i754 = uint_rshift(i751, i386) - i755 = int_lshift(i751, i373) - i756 = uint_rshift(i755, i386) - i757 = int_lshift(i756, 8) - i758 = int_or(i754, i757) - i759 = int_lshift(i755, i373) - i760 = uint_rshift(i759, i386) - i761 = int_lshift(i760, 16) - i762 = int_or(i758, i761) - i763 = int_lshift(i759, i373) - i764 = uint_rshift(i763, i386) - i765 = int_lshift(i764, 24) - i766 = int_or(i762, i765) - i767 = int_lshift(i763, i373) - setarrayitem_raw(8650752, i752, i766, descr=) - i768 = int_add(i752, 1) - i769 = int_add_ovf(i600, i571) - guard_no_overflow(descr=) - i770 = int_add_ovf(i602, i571) - guard_no_overflow(descr=) - i771 = int_sub(i604, 11) - setfield_gc(ConstPtr(ptr177), i771, descr=) - i772 = int_le(i771, 0) - guard_false(i772, descr=) - p773 = new_with_vtable(23083336) - setfield_gc(p773, i769, descr=) - setarrayitem_gc(p147, 34, p773, descr=) - p774 = new_with_vtable(23083336) - setfield_gc(p774, i770, descr=) - setarrayitem_gc(p147, 35, p774, descr=) - p775 = new_with_vtable(23083336) - setfield_gc(p775, i618, descr=) - setarrayitem_gc(p147, 20, p775, descr=) - i776 = arraylen_gc(p147, descr=) - i777 = arraylen_gc(p581, descr=) - jump(p0, p3, p8, i615, p596, i620, p18, i603, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, 1, p149, p773, i162, p158, p774, i171, p167, p147, i771, i187, p184, p190, p775, i224, p200, p247, i256, p254, p263, p142, p281, i290, i292, i328, i343, i386, i373, i571, p569, p596, p581, descr=TargetToken(48932608)) + i596 = int_le(2, i152) + guard_false(i596, descr=) + i597 = getfield_gc_pure(p587, descr=) + i598 = int_add_ovf(i597, i161) + guard_no_overflow(descr=) + i599 = getfield_gc_pure(p590, descr=) + i600 = int_add_ovf(i599, i170) + guard_no_overflow(descr=) + i601 = int_add_ovf(i175, 1) + guard_no_overflow(descr=) + i602 = int_sub(i583, 1) + setfield_gc(ConstPtr(ptr176), i602, descr=) + i603 = int_le(i602, 0) + guard_false(i603, descr=) + i604 = int_le(i601, i186) + guard_true(i604, descr=) + guard_not_invalidated(descr=) + i605 = getfield_gc_pure(p362, descr=) + i606 = int_mod(i605, i223) + i607 = int_rshift(i606, 31) + i608 = int_and(i223, i607) + i609 = int_add(i606, i608) + i610 = int_add_ovf(1, i609) + guard_no_overflow(descr=) + i611 = int_ge(i609, 0) + guard_true(i611, descr=) + i612 = int_lt(i609, i223) + guard_true(i612, descr=) + i613 = getarrayitem_gc(p246, i609, descr=) + i614 = uint_lt(i613, 0) + guard_false(i614, descr=) + i615 = uint_lt(i613, 2147483647) + guard_true(i615, descr=) + i616 = int_add_ovf(i605, i255) + guard_no_overflow(descr=) + i617 = int_ge(i613, 0) + guard_true(i617, descr=) + i618 = int_and(i613, i613) + i619 = uint_lt(i618, 2147483647) + guard_true(i619, descr=) + i620 = int_add_ovf(i600, 1) + guard_no_overflow(descr=) + i621 = int_ge(i600, 0) + guard_true(i621, descr=) + i622 = int_lt(i600, i289) + guard_true(i622, descr=) + i623 = getarrayitem_raw(i291, i600, descr=) + i624 = uint_lt(i623, 0) + guard_false(i624, descr=) + i625 = uint_lt(i623, 2147483647) + guard_true(i625, descr=) + i626 = int_and(i326, i618) + i627 = uint_lt(i626, 2147483647) + guard_true(i627, descr=) + i628 = getarrayitem_raw(i291, i600, descr=) + i629 = uint_lt(i628, 0) + guard_false(i629, descr=) + i630 = uint_lt(i628, 2147483647) + guard_true(i630, descr=) + i631 = int_ge(i628, 0) + guard_true(i631, descr=) + i632 = int_and(i341, i628) + i633 = uint_lt(i632, 2147483647) + guard_true(i633, descr=) + i634 = int_ge(i626, 0) + guard_true(i634, descr=) + i635 = int_or(i626, i632) + i636 = uint_lt(i635, 2147483647) + guard_true(i636, descr=) + setarrayitem_raw(i291, i600, i635, descr=) + i638 = int_lshift(i600, 3) + i639 = int_ge(i638, i289) + guard_false(i639, descr=) + i640 = uint_rshift(i635, i384) + i641 = int_lshift(i635, i371) + i642 = uint_rshift(i641, i384) + i643 = int_lshift(i642, 8) + i644 = int_or(i640, i643) + i645 = int_lshift(i641, i371) + i646 = uint_rshift(i645, i384) + i647 = int_lshift(i646, 16) + i648 = int_or(i644, i647) + i649 = int_lshift(i645, i371) + i650 = uint_rshift(i649, i384) + i651 = int_lshift(i650, 24) + i652 = int_or(i648, i651) + i653 = int_lshift(i649, i371) + setarrayitem_raw(18153472, i638, i652, descr=) + i654 = int_add(i638, 1) + i655 = int_ge(i654, i289) + guard_false(i655, descr=) + i656 = uint_rshift(i653, i384) + i657 = int_lshift(i653, i371) + i658 = uint_rshift(i657, i384) + i659 = int_lshift(i658, 8) + i660 = int_or(i656, i659) + i661 = int_lshift(i657, i371) + i662 = uint_rshift(i661, i384) + i663 = int_lshift(i662, 16) + i664 = int_or(i660, i663) + i665 = int_lshift(i661, i371) + i666 = uint_rshift(i665, i384) + i667 = int_lshift(i666, 24) + i668 = int_or(i664, i667) + i669 = int_lshift(i665, i371) + setarrayitem_raw(18153472, i654, i668, descr=) + i670 = int_add(i654, 1) + i671 = int_ge(i670, i289) + guard_false(i671, descr=) + i672 = uint_rshift(i669, i384) + i673 = int_lshift(i669, i371) + i674 = uint_rshift(i673, i384) + i675 = int_lshift(i674, 8) + i676 = int_or(i672, i675) + i677 = int_lshift(i673, i371) + i678 = uint_rshift(i677, i384) + i679 = int_lshift(i678, 16) + i680 = int_or(i676, i679) + i681 = int_lshift(i677, i371) + i682 = uint_rshift(i681, i384) + i683 = int_lshift(i682, 24) + i684 = int_or(i680, i683) + i685 = int_lshift(i681, i371) + setarrayitem_raw(18153472, i670, i684, descr=) + i686 = int_add(i670, 1) + i687 = int_ge(i686, i289) + guard_false(i687, descr=) + i688 = uint_rshift(i685, i384) + i689 = int_lshift(i685, i371) + i690 = uint_rshift(i689, i384) + i691 = int_lshift(i690, 8) + i692 = int_or(i688, i691) + i693 = int_lshift(i689, i371) + i694 = uint_rshift(i693, i384) + i695 = int_lshift(i694, 16) + i696 = int_or(i692, i695) + i697 = int_lshift(i693, i371) + i698 = uint_rshift(i697, i384) + i699 = int_lshift(i698, 24) + i700 = int_or(i696, i699) + i701 = int_lshift(i697, i371) + setarrayitem_raw(18153472, i686, i700, descr=) + i702 = int_add(i686, 1) + i703 = int_ge(i702, i289) + guard_false(i703, descr=) + i704 = uint_rshift(i701, i384) + i705 = int_lshift(i701, i371) + i706 = uint_rshift(i705, i384) + i707 = int_lshift(i706, 8) + i708 = int_or(i704, i707) + i709 = int_lshift(i705, i371) + i710 = uint_rshift(i709, i384) + i711 = int_lshift(i710, 16) + i712 = int_or(i708, i711) + i713 = int_lshift(i709, i371) + i714 = uint_rshift(i713, i384) + i715 = int_lshift(i714, 24) + i716 = int_or(i712, i715) + i717 = int_lshift(i713, i371) + setarrayitem_raw(18153472, i702, i716, descr=) + i718 = int_add(i702, 1) + i719 = int_ge(i718, i289) + guard_false(i719, descr=) + i720 = uint_rshift(i717, i384) + i721 = int_lshift(i717, i371) + i722 = uint_rshift(i721, i384) + i723 = int_lshift(i722, 8) + i724 = int_or(i720, i723) + i725 = int_lshift(i721, i371) + i726 = uint_rshift(i725, i384) + i727 = int_lshift(i726, 16) + i728 = int_or(i724, i727) + i729 = int_lshift(i725, i371) + i730 = uint_rshift(i729, i384) + i731 = int_lshift(i730, 24) + i732 = int_or(i728, i731) + i733 = int_lshift(i729, i371) + setarrayitem_raw(18153472, i718, i732, descr=) + i734 = int_add(i718, 1) + i735 = int_ge(i734, i289) + guard_false(i735, descr=) + i736 = uint_rshift(i733, i384) + i737 = int_lshift(i733, i371) + i738 = uint_rshift(i737, i384) + i739 = int_lshift(i738, 8) + i740 = int_or(i736, i739) + i741 = int_lshift(i737, i371) + i742 = uint_rshift(i741, i384) + i743 = int_lshift(i742, 16) + i744 = int_or(i740, i743) + i745 = int_lshift(i741, i371) + i746 = uint_rshift(i745, i384) + i747 = int_lshift(i746, 24) + i748 = int_or(i744, i747) + i749 = int_lshift(i745, i371) + setarrayitem_raw(18153472, i734, i748, descr=) + i750 = int_add(i734, 1) + i751 = int_ge(i750, i289) + guard_false(i751, descr=) + i752 = uint_rshift(i749, i384) + i753 = int_lshift(i749, i371) + i754 = uint_rshift(i753, i384) + i755 = int_lshift(i754, 8) + i756 = int_or(i752, i755) + i757 = int_lshift(i753, i371) + i758 = uint_rshift(i757, i384) + i759 = int_lshift(i758, 16) + i760 = int_or(i756, i759) + i761 = int_lshift(i757, i371) + i762 = uint_rshift(i761, i384) + i763 = int_lshift(i762, 24) + i764 = int_or(i760, i763) + i765 = int_lshift(i761, i371) + setarrayitem_raw(18153472, i750, i764, descr=) + i766 = int_add(i750, 1) + i767 = int_add_ovf(i598, i569) + guard_no_overflow(descr=) + i768 = int_add_ovf(i600, i569) + guard_no_overflow(descr=) + i769 = int_sub(i602, 10) + setfield_gc(ConstPtr(ptr176), i769, descr=) + i770 = int_le(i769, 0) + guard_false(i770, descr=) + p771 = new_with_vtable(23559752) + setfield_gc(p771, i767, descr=) + setarrayitem_gc(p146, 34, p771, descr=) + p772 = new_with_vtable(23559752) + setfield_gc(p772, i768, descr=) + setarrayitem_gc(p146, 35, p772, descr=) + p773 = new_with_vtable(23559752) + setfield_gc(p773, i616, descr=) + setarrayitem_gc(p146, 20, p773, descr=) + i774 = arraylen_gc(p146, descr=) + i775 = arraylen_gc(p579, descr=) + jump(p0, i1, p3, p8, i613, p594, i618, p18, i601, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, 1, p148, p771, i161, p157, p772, i170, p166, p146, i769, i186, p183, p189, p773, i223, p199, p246, i255, p253, p262, p141, p280, i289, i291, i326, i341, i384, i371, i569, p567, p594, p579, descr=TargetToken(57116320)) """) @py.test.mark.skipif("'just dozens of long traces'") diff --git a/spyvm/test/jittest/test_strategies.py b/spyvm/test/jittest/test_strategies.py --- a/spyvm/test/jittest/test_strategies.py +++ b/spyvm/test/jittest/test_strategies.py @@ -12,80 +12,80 @@ (1 to: 10000) asOrderedCollection. """) self.assert_matches(traces[0].loop, """ - i197 = getarrayitem_gc(p54, 1, descr=), - i198 = int_eq(i197, 2147483647), - guard_false(i198, descr=), - i199 = int_ge(i197, i190), - guard_true(i199, descr=), - cond_call(i76, 21753520, p68, descr=), - cond_call(i106, 21753520, p92, descr=), - cond_call(i106, 21753520, p92, descr=), - p200 = getarrayitem_gc(p108, 0, descr=), - cond_call(i106, 21753520, p92, descr=), - p202 = new_with_vtable(23083336), - setfield_gc(p202, i190, descr=), - setarrayitem_gc(p108, 1, p202, descr=), - setarrayitem_gc(p80, 0, p200, descr=), - setfield_gc(p68, 2, descr=), - setfield_gc(p68, 15, descr=), - setfield_gc(p68, p0, descr=), - setfield_gc(ConstPtr(ptr82), i89, descr=), - setarrayitem_gc(p80, 1, p202, descr=), - guard_class(p200, 23083152, descr=), - p203 = getfield_gc(p200, descr=), - p204 = getfield_gc(p203, descr=), - guard_value(p204, ConstPtr(ptr121), descr=), - guard_not_invalidated(descr=), - p205 = getfield_gc(p200, descr=), - setarrayitem_gc(p80, 0, ConstPtr(null), descr=), - setfield_gc(p68, 0, descr=), - setfield_gc(ConstPtr(ptr82), i136, descr=), - setarrayitem_gc(p80, 1, ConstPtr(null), descr=), - guard_class(p205, ConstClass(ListStorageShadow), descr=), - p208 = getfield_gc_pure(p205, descr=), - p209 = getarrayitem_gc(p208, 2, descr=), - p210 = getarrayitem_gc(p208, 0, descr=), - guard_class(p210, 23083152, descr=), - p211 = getfield_gc(p210, descr=), - p212 = getfield_gc(p211, descr=), - guard_value(p212, ConstPtr(ptr154), descr=), - p213 = getfield_gc(p210, descr=), - guard_nonnull_class(p213, 23088412, descr=), - p214 = getfield_gc_pure(p213, descr=), - i215 = arraylen_gc(p214, descr=), - i216 = getfield_gc_pure(p213, descr=), - guard_nonnull_class(p209, 23083336, descr=), - i217 = getfield_gc_pure(p209, descr=), - i218 = int_eq(i217, i215), - guard_false(i218, descr=), - i219 = int_add_ovf(i217, 1), - guard_no_overflow(descr=), - i220 = int_ge(i217, 0), - guard_true(i220, descr=), - i221 = int_lt(i217, i215), - guard_true(i221, descr=), - i222 = int_eq(i190, 2147483647), - guard_false(i222, descr=), - setarrayitem_gc(p214, i217, i190, descr=), - i223 = getarrayitem_gc(p54, 2, descr=), - setfield_gc(p68, -1, descr=), - setfield_gc(p68, ConstPtr(null), descr=), - setfield_gc(ConstPtr(ptr82), i85, descr=), - i224 = int_eq(i223, 2147483647), - guard_false(i224, descr=), - i225 = int_add_ovf(i190, i223), - guard_no_overflow(descr=), - i226 = int_sub(i193, 5), - setfield_gc(ConstPtr(ptr82), i226, descr=), - i227 = int_le(i226, 0), - guard_false(i227, descr=), - p228 = new_with_vtable(23083336), - setfield_gc(p228, i219, descr=), - setarrayitem_gc(p208, 2, p228, descr=), - i229 = arraylen_gc(p54, descr=), - i230 = arraylen_gc(p80, descr=), - i231 = arraylen_gc(p108, descr=), - jump(p0, p3, p6, i225, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p54, i76, p68, i106, p92, p108, p80, i89, i91, i136, i85, i226, descr=TargetToken(48645456)) + i191 = getarrayitem_gc(p53, 1, descr=) + i192 = int_eq(i191, 2147483647) + guard_false(i192, descr=) + i193 = int_ge(i191, i184) + guard_true(i193, descr=) + cond_call(i75, 22145408, p67, descr=) + cond_call(i101, 22145408, p90, descr=) + cond_call(i101, 22145408, p90, descr=) + p194 = getarrayitem_gc(p103, 0, descr=) + cond_call(i101, 22145408, p90, descr=) + p196 = new_with_vtable(23559752) + setfield_gc(p196, i184, descr=) + setarrayitem_gc(p103, 1, p196, descr=) + setarrayitem_gc(p79, 0, p194, descr=) + setfield_gc(p67, 2, descr=) + setfield_gc(p67, 11, descr=) + setfield_gc(p67, p0, descr=) + setfield_gc(ConstPtr(ptr81), i87, descr=) + setarrayitem_gc(p79, 1, p196, descr=) + guard_class(p194, 23559528, descr=) + p198 = getfield_gc(p194, descr=) + p199 = getfield_gc(p198, descr=) + guard_value(p199, ConstPtr(ptr115), descr=) + guard_not_invalidated(descr=) + p200 = getfield_gc(p194, descr=) + setarrayitem_gc(p79, 0, ConstPtr(null), descr=) + setfield_gc(p67, 0, descr=) + setfield_gc(ConstPtr(ptr81), i130, descr=) + setarrayitem_gc(p79, 1, ConstPtr(null), descr=) + guard_class(p200, ConstClass(ListStorageShadow), descr=) + p203 = getfield_gc_pure(p200, descr=) + p204 = getarrayitem_gc(p203, 2, descr=) + p205 = getarrayitem_gc(p203, 0, descr=) + guard_class(p205, 23559528, descr=) + p206 = getfield_gc(p205, descr=) + p207 = getfield_gc(p206, descr=) + guard_value(p207, ConstPtr(ptr148), descr=) + p208 = getfield_gc(p205, descr=) + guard_nonnull_class(p208, 23564708, descr=) + p209 = getfield_gc_pure(p208, descr=) + i210 = arraylen_gc(p209, descr=) + i211 = getfield_gc_pure(p208, descr=) + guard_nonnull_class(p204, 23559752, descr=) + i212 = getfield_gc_pure(p204, descr=) + i213 = int_eq(i212, i210) + guard_false(i213, descr=) + i214 = int_add_ovf(i212, 1) + guard_no_overflow(descr=) + i215 = int_ge(i212, 0) + guard_true(i215, descr=) + i216 = int_lt(i212, i210) + guard_true(i216, descr=) + i217 = int_eq(i184, 2147483647) + guard_false(i217, descr=) + setarrayitem_gc(p209, i212, i184, descr=) + i218 = getarrayitem_gc(p53, 2, descr=) + setfield_gc(p67, -1, descr=) + setfield_gc(p67, ConstPtr(null), descr=) + setfield_gc(ConstPtr(ptr81), i82, descr=) + i219 = int_eq(i218, 2147483647) + guard_false(i219, descr=) + i220 = int_add_ovf(i184, i218) + guard_no_overflow(descr=) + i221 = int_sub(i187, 4) + setfield_gc(ConstPtr(ptr81), i221, descr=) + i222 = int_le(i221, 0) + guard_false(i222, descr=) + p223 = new_with_vtable(23559752) + setfield_gc(p223, i214, descr=) + setarrayitem_gc(p203, 2, p223, descr=) + i224 = arraylen_gc(p53, descr=) + i225 = arraylen_gc(p79, descr=) + i226 = arraylen_gc(p103, descr=) +jump(p0, i1, p3, p6, i220, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p53, i75, p67, i101, p90, p103, p79, i87, i89, i130, i82, i221, descr=TargetToken(58164944)) """) def test_indexOf(self, spy, tmpdir): @@ -95,32 +95,32 @@ """) # First loop: asOrderedCollection, second loop: makeRoomAtLast self.assert_matches(traces[2].loop, """ - i144 = int_le(i137, i63), - guard_true(i144, descr=), - guard_not_invalidated(descr=), - setfield_gc(ConstPtr(ptr85), i92, descr=), - i145 = int_add_ovf(i137, i101), - guard_no_overflow(descr=), - i146 = int_sub(i145, 1), - i147 = int_gt(i146, i109), - guard_false(i147, descr=), - i148 = int_sub(i146, 1), - i149 = int_ge(i148, 0), - guard_true(i149, descr=), - i150 = int_lt(i148, i127), - guard_true(i150, descr=), - i151 = getarrayitem_gc(p126, i148, descr=), - i152 = int_eq(i151, 2147483647), - guard_false(i152, descr=), - setfield_gc(ConstPtr(ptr85), i88, descr=), - i153 = int_eq(i151, i134), - guard_false(i153, descr=), - i154 = int_add_ovf(i137, 1), - guard_no_overflow(descr=), - i155 = int_sub(i140, 3), - setfield_gc(ConstPtr(ptr85), i155, descr=), - i156 = int_le(i155, 0), - guard_false(i156, descr=), - i157 = arraylen_gc(p97, descr=), - jump(p0, p3, p6, p8, p10, i154, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i63, p65, i92, i101, p99, i109, p106, p112, i127, p126, i88, i134, i155, p97, descr=TargetToken(53728496)) + i142 = int_le(i135, i62) + guard_true(i142, descr=) + guard_not_invalidated(descr=) + setfield_gc(ConstPtr(ptr84), i90, descr=) + i143 = int_add_ovf(i135, i99) + guard_no_overflow(descr=) + i144 = int_sub(i143, 1) + i145 = int_gt(i144, i107) + guard_false(i145, descr=) + i146 = int_sub(i144, 1) + i147 = int_ge(i146, 0) + guard_true(i147, descr=) + i148 = int_lt(i146, i125) + guard_true(i148, descr=) + i149 = getarrayitem_gc(p124, i146, descr=) + i150 = int_eq(i149, 2147483647) + guard_false(i150, descr=) + setfield_gc(ConstPtr(ptr84), i86, descr=) + i151 = int_eq(i149, i132) + guard_false(i151, descr=) + i152 = int_add_ovf(i135, 1) + guard_no_overflow(descr=) + i153 = int_sub(i138, 3) + setfield_gc(ConstPtr(ptr84), i153, descr=) + i154 = int_le(i153, 0) + guard_false(i154, descr=) + i155 = arraylen_gc(p95, descr=) + jump(p0, i1, p3, p6, p8, p10, i152, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, p48, p50, p52, i62, p64, i90, i99, p97, i107, p104, p110, i125, p124, i86, i132, i153, p95, descr=TargetToken(47907984)) """) From noreply at buildbot.pypy.org Thu May 15 12:40:33 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 15 May 2014 12:40:33 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged new interpreter flags. Message-ID: <20140515104033.EDB791C0190@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r829:2649a886e4a0 Date: 2014-05-14 15:13 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2649a886e4a0/ Log: Merged new interpreter flags. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -25,7 +25,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", - "startup_time", "evented"] + "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], @@ -34,9 +34,9 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, image_name="", trace=False, - evented=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def __init__(self, space, image=None, image_name="", + trace=False, evented=True, interrupts=True, + max_stack_depth=constants.MAX_LOOP_DEPTH): import time # === Initialize immutable variables @@ -49,6 +49,7 @@ self.startup_time = constants.CompileTime self.max_stack_depth = max_stack_depth self.evented = evented + self.interrupts = interrupts try: self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: @@ -56,7 +57,7 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.remaining_stack_depth = max_stack_depth + self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -65,13 +66,15 @@ # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.remaining_stack_depth == self.max_stack_depth + assert self.current_stack_depth == 0 # Need to save s_sender, loop_bytecodes will nil this on return s_sender = s_new_context.s_sender() try: self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: + if self.trace: + print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: s_new_context = s_sender @@ -112,16 +115,17 @@ else: s_context.push(nlr.value) - # This is just a wrapper around loop_bytecodes that handles the remaining_stack_depth mechanism + # This is just a wrapper around loop_bytecodes that handles the stack overflow protection mechanism def stack_frame(self, s_new_frame, may_context_switch=True): - if self.remaining_stack_depth <= 1: - raise StackOverflow(s_new_frame) - - self.remaining_stack_depth -= 1 + if self.max_stack_depth > 0: + if self.current_stack_depth >= self.max_stack_depth: + raise StackOverflow(s_new_frame) + + self.current_stack_depth += 1 try: self.loop_bytecodes(s_new_frame, may_context_switch) finally: - self.remaining_stack_depth += 1 + self.current_stack_depth -= 1 def step(self, context): bytecode = context.fetch_next_bytecode() @@ -139,6 +143,8 @@ # ============== Methods for handling user interrupts ============== def jitted_check_for_interrupt(self, s_frame): + if not self.interrupts: + return # Normally, the tick counter is decremented by 1 for every message send. # Since we don't know how many messages are called during this trace, we # just decrement by 100th of the trace length (num of bytecodes). @@ -148,6 +154,8 @@ self.quick_check_for_interrupt(s_frame, decr_by) def quick_check_for_interrupt(self, s_frame, dec=1): + if not self.interrupts: + return self.interrupt_check_counter -= dec if self.interrupt_check_counter <= 0: self.interrupt_check_counter = self.interrupt_counter_size @@ -210,7 +218,7 @@ return self.interpret_toplevel(s_frame.w_self()) def padding(self, symbol=' '): - return symbol * (self.max_stack_depth - self.remaining_stack_depth) + return symbol * self.current_stack_depth class ReturnFromTopLevel(Exception): _attrs_ = ["object"] diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1014,7 +1014,7 @@ class StackTestInterpreter(TestInterpreter): def stack_frame(self, w_frame, may_interrupt=True): - stack_depth = self.max_stack_depth - self.remaining_stack_depth + stack_depth = self.current_stack_depth for i in range(stack_depth + 1): assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' assert sys._getframe(6 + stack_depth * 7).f_code.co_name == 'loop' diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,7 +6,7 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow, storage_statistics + error, shadow, storage_statistics, constants from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine @@ -128,13 +128,14 @@ -r|--run [code string] -b|--benchmark [code string] -p|--poll_events + -ni|--no-interrupts + -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] --strategy-log --strategy-stats - --strategy-stats-dot + --strategy-stats-dot --strategy-stats-details [image path, default: Squeak.image] - """ % argv[0] - + """ % (argv[0], constants.MAX_LOOP_DEPTH) def _arg_missing(argv, idx, arg): if len(argv) == idx + 1: @@ -152,6 +153,8 @@ stringarg = "" code = None as_benchmark = False + max_stack_depth = constants.MAX_LOOP_DEPTH + interrupts = True while idx < len(argv): arg = argv[idx] @@ -189,6 +192,12 @@ code = argv[idx + 1] as_benchmark = True idx += 1 + elif arg in ["-ni", "--no-interrupts"]: + interrupts = False + elif arg in ["-d", "--max-stack-depth"]: + _arg_missing(argv, idx, arg) + max_stack_depth = int(argv[idx + 1]) + idx += 1 elif arg == "--strategy-log": storage_statistics.activate_statistics(log=True) elif arg == "--strategy-stats": @@ -221,7 +230,9 @@ space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented) + interp = interpreter.Interpreter(space, image, image_name=path, + trace=trace, evented=evented, + interrupts=interrupts, max_stack_depth=max_stack_depth) space.runtime_setup(argv[0]) result = 0 if benchmark is not None: From noreply at buildbot.pypy.org Thu May 15 12:40:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 15 May 2014 12:40:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Merged storage branch. Message-ID: <20140515104035.0A4F11C0190@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r830:a8514605c0c4 Date: 2014-05-15 12:40 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a8514605c0c4/ Log: Merged storage branch. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -25,7 +25,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", - "startup_time", "evented"] + "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], @@ -34,9 +34,9 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, image_name="", trace=False, - evented=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def __init__(self, space, image=None, image_name="", + trace=False, evented=True, interrupts=True, + max_stack_depth=constants.MAX_LOOP_DEPTH): import time # === Initialize immutable variables @@ -49,6 +49,7 @@ self.startup_time = constants.CompileTime self.max_stack_depth = max_stack_depth self.evented = evented + self.interrupts = interrupts try: self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: @@ -56,7 +57,7 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.remaining_stack_depth = max_stack_depth + self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -65,13 +66,15 @@ # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.remaining_stack_depth == self.max_stack_depth + assert self.current_stack_depth == 0 # Need to save s_sender, loop_bytecodes will nil this on return s_sender = s_new_context.s_sender() try: self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: + if self.trace: + print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: s_new_context = s_sender @@ -106,16 +109,26 @@ self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit(pc=pc, self=self, method=method, s_context=s_context) - # This is just a wrapper around loop_bytecodes that handles the remaining_stack_depth mechanism + try: + self.step(s_context) + except Return, nlr: + if nlr.s_target_context is not s_context: + s_context._activate_unwind_context(self) + raise nlr + else: + s_context.push(nlr.value) + + # This is just a wrapper around loop_bytecodes that handles the stack overflow protection mechanism def stack_frame(self, s_new_frame, may_context_switch=True, fresh_context=False): - if self.remaining_stack_depth <= 1: - raise StackOverflow(s_new_frame) - - self.remaining_stack_depth -= 1 + if self.max_stack_depth > 0: + if self.current_stack_depth >= self.max_stack_depth: + raise StackOverflow(s_new_frame) + + self.current_stack_depth += 1 try: self.loop_bytecodes(s_new_frame, may_context_switch=may_context_switch, fresh_context=fresh_context) finally: - self.remaining_stack_depth += 1 + self.current_stack_depth -= 1 def step(self, context, pc): bytecode = context.fetch_bytecode(pc) @@ -134,6 +147,8 @@ # ============== Methods for handling user interrupts ============== def jitted_check_for_interrupt(self, s_frame): + if not self.interrupts: + return # Normally, the tick counter is decremented by 1 for every message send. # Since we don't know how many messages are called during this trace, we # just decrement by 100th of the trace length (num of bytecodes). @@ -143,6 +158,8 @@ self.quick_check_for_interrupt(s_frame, decr_by) def quick_check_for_interrupt(self, s_frame, dec=1): + if not self.interrupts: + return self.interrupt_check_counter -= dec if self.interrupt_check_counter <= 0: self.interrupt_check_counter = self.interrupt_counter_size @@ -205,7 +222,7 @@ return self.interpret_toplevel(s_frame.w_self()) def padding(self, symbol=' '): - return symbol * (self.max_stack_depth - self.remaining_stack_depth) + return symbol * self.current_stack_depth class ReturnFromTopLevel(Exception): _attrs_ = ["object"] diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1014,7 +1014,7 @@ class StackTestInterpreter(TestInterpreter): def stack_frame(self, w_frame, may_interrupt=True, fresh_context=False): - stack_depth = self.max_stack_depth - self.remaining_stack_depth + stack_depth = self.current_stack_depth for i in range(stack_depth + 1): assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' assert sys._getframe(6 + stack_depth * 7).f_code.co_name == 'loop' diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,7 +6,7 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow, storage_statistics + error, shadow, storage_statistics, constants from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine @@ -128,13 +128,14 @@ -r|--run [code string] -b|--benchmark [code string] -p|--poll_events + -ni|--no-interrupts + -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] --strategy-log --strategy-stats - --strategy-stats-dot + --strategy-stats-dot --strategy-stats-details [image path, default: Squeak.image] - """ % argv[0] - + """ % (argv[0], constants.MAX_LOOP_DEPTH) def _arg_missing(argv, idx, arg): if len(argv) == idx + 1: @@ -152,6 +153,8 @@ stringarg = "" code = None as_benchmark = False + max_stack_depth = constants.MAX_LOOP_DEPTH + interrupts = True while idx < len(argv): arg = argv[idx] @@ -189,6 +192,12 @@ code = argv[idx + 1] as_benchmark = True idx += 1 + elif arg in ["-ni", "--no-interrupts"]: + interrupts = False + elif arg in ["-d", "--max-stack-depth"]: + _arg_missing(argv, idx, arg) + max_stack_depth = int(argv[idx + 1]) + idx += 1 elif arg == "--strategy-log": storage_statistics.activate_statistics(log=True) elif arg == "--strategy-stats": @@ -221,7 +230,9 @@ space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented) + interp = interpreter.Interpreter(space, image, image_name=path, + trace=trace, evented=evented, + interrupts=interrupts, max_stack_depth=max_stack_depth) space.runtime_setup(argv[0]) result = 0 if benchmark is not None: From noreply at buildbot.pypy.org Thu May 15 13:24:27 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 15 May 2014 13:24:27 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: Oups, removed artifact from earlier merge. Message-ID: <20140515112427.9D0421C02D8@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-refactoring-virtual-pc Changeset: r831:76403ee39c2e Date: 2014-05-15 12:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/76403ee39c2e/ Log: Oups, removed artifact from earlier merge. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -109,15 +109,6 @@ self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit(pc=pc, self=self, method=method, s_context=s_context) - try: - self.step(s_context) - except Return, nlr: - if nlr.s_target_context is not s_context: - s_context._activate_unwind_context(self) - raise nlr - else: - s_context.push(nlr.value) - # This is just a wrapper around loop_bytecodes that handles the stack overflow protection mechanism def stack_frame(self, s_new_frame, may_context_switch=True, fresh_context=False): if self.max_stack_depth > 0: From noreply at buildbot.pypy.org Thu May 15 15:33:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 May 2014 15:33:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: move report's content to the paper Message-ID: <20140515133334.05AC61C0320@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5250:666870802d18 Date: 2014-05-15 15:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/666870802d18/ Log: move report's content to the paper diff --git a/talk/dls2014/paper/mmap pages.pdf b/talk/dls2014/paper/mmap pages.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4b877ac47e2c13c60867014b61c51ede4e761ee4 GIT binary patch [cut] diff --git a/talk/dls2014/paper/page remapping.pdf b/talk/dls2014/paper/page remapping.pdf new file mode 100644 index 0000000000000000000000000000000000000000..071a52617241a8d3067de4338aaa9c1d211c3353 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -7,13 +7,68 @@ % 10pt To set in 10-point type instead of 9-point. % 11pt To set in 11-point type instead of 9-point. % authoryear To obtain author/year citation style instead of numeric. +\synctex=-1 \usepackage[utf8]{inputenc} \usepackage{array} \usepackage{color} -\usepackage{hyperref} \usepackage{amsmath} \usepackage{amssymb} +\usepackage{fixltx2e} +\usepackage{graphicx} +\usepackage[unicode=true,pdfusetitle, + bookmarks=true,bookmarksnumbered=false,bookmarksopen=false, + breaklinks=false,pdfborder={0 0 1},backref=false,colorlinks=false]{hyperref} + + +\makeatletter +\usepackage{enumitem} +\usepackage{multicol} + +% nice listings +\usepackage{xcolor} +\usepackage{newverbs} + +\usepackage{color} +\definecolor{verylightgray}{rgb}{0.93,0.93,0.93} +\definecolor{darkblue}{rgb}{0.2,0.2,0.6} +\definecolor{commentgreen}{rgb}{0.25,0.5,0.37} +\usepackage{letltxmacro} + +\usepackage{listings} + +\makeatletter +\LetLtxMacro{\oldlstinline}{\lstinline} + +\renewcommand\lstinline[1][]{% + \Collectverb{\@@myverb}% +} + +\def\@@myverb#1{% + \begingroup + \fboxsep=0.2em + \colorbox{verylightgray}{\oldlstinline|#1|}% + \endgroup +} +\makeatother + + + + +\lstset{backgroundcolor={\color{verylightgray}}, + basicstyle={\scriptsize\ttfamily}, + commentstyle={\ttfamily\color{commentgreen}}, + keywordstyle={\bfseries\color{darkblue}}, + morecomment={[l]{//}}, + tabsize=4, + morekeywords={foreach,in,def,type,dynamic,Int, + Boolean,infer,void,super,if,boolean,int,else, + while,do,extends,class,assert,for,switch,case, + private,protected,public,const,final,static, + interface,new,true,false,null,return}} +\renewcommand{\lstlistingname}{Listing} + + \newcommand{\mynote}[2]{% \textcolor{red}{% @@ -22,7 +77,10 @@ }% } -\newcommand\cfbolz[1]{\mynote{Carl Friedrich}{#1}} +\newcommand\remi[1]{\mynote{Remi}{#1}} +\newcommand\cfbolz[1]{\mynote{cfbolz}{#1}} + + \begin{document} @@ -48,8 +106,8 @@ %% \titlebanner{banner above paper title} % These are ignored unless %% \preprintfooter{short description of paper} % 'preprint' option specified. -\title{A Way Forward in Parallelising Dynamic Languages} -\subtitle{Position Paper, ICOOOLPS'14} +\title{Virtual Memory Assisted Transactional Memory for Dynamic Languages} +\subtitle{DLS'14} \authorinfo{Remigius Meier} {Department of Computer Science\\ ETH Zürich} @@ -75,10 +133,809 @@ ... \section{Introduction} -... -\section{Conclusion} +Dynamic languages like Python, PHP, Ruby, and JavaScript are usually +regarded as very expressive but also very slow. In recent years, the +introduction of just-in-time compilers (JIT) for these languages (e.g. +PyPy, V8, Tracemonkey) started to change this perception by delivering +good performance that enables new applications. However, a parallel +programming model was not part of the design of those languages. Thus, +the reference implementations of e.g. Python and Ruby use a single, +global interpreter lock (GIL) to serialize the execution of code in +threads. + +While this GIL prevents any parallelism from occurring, it also +provides some useful guarantees. Since this lock is always acquired +while executing bytecode instructions and it may only be released +in-between such instructions, it provides perfect isolation and +atomicity between multiple threads for a series of +instructions. Another technology that can provide the same guarantees +is transactional memory (TM). + +There have been several attempts at replacing the GIL with TM. Using +transactions to enclose multiple bytecode instructions, we can get the +very same semantics as the GIL while possibly executing several +transactions in parallel. Furthermore, by exposing these +interpreter-level transactions to the application in the form of +\emph{atomic blocks}, we give dynamic languages a new synchronization +mechanism that avoids several of the problems of locks as they are +used now. + +\cfbolz{the above is good, here is something missing: problems with current STM approaches, outlining the intuition behind the new one} + +Our contributions include: +\begin{itemize}[noitemsep] +\item We introduce a new software transactional memory (STM) system + that performs well even on low numbers of CPUs. It uses a novel + combination of hardware features and garbage collector (GC) + integration in order to keep the overhead of STM very low. +\item This new STM system is used to replace the GIL in Python and is + then evaluated extensively. +\item We introduce atomic blocks to the Python language to provide a + backwards compatible, composable synchronization mechanism for + threads. +\end{itemize} + + + +\section{Background} + + +\subsection{Transactional Memory} + +Transactional memory (TM) is a concurrency control mechanism that +comes from database systems. Using transactions, we can group a series +of instructions performing operations on memory and make them happen +atomically and in complete isolations from other +transactions. \emph{Atomicity} means that all these instructions in +the transaction and their effects seem to happen at one, undividable +point in time. Other transactions never see inconsistent state of a +partially executed transaction which is called \emph{isolation}. + +If we start multiple such transactions in multiple threads, the TM +system guarantees that the outcome of running the transactions is +\emph{serializable}. Meaning, the outcome is equal to some sequential +execution of these transactions. This means that the approach provides the same +semantics as using the GIL +while still allowing the TM system to +run transactions in parallel as an optimization. + + +\subsection{Python} + +\cfbolz{a pypy introduction needs to go somewhere, a paragraph or so. maybe in the evaluation section} + +We implement and evaluate our system for the Python language. For the +actual implementation, we chose the PyPy interpreter because replacing +the GIL there with a TM system is just a matter of adding a new +transformation to the translation process of the interpreter. + +Over the years, Python added multiple ways to provide concurrency and +parallelism to its applications. We want to highlight two of them, +namely \emph{threading} and \emph{multiprocessing}. + +\emph{Threading} employs operating system (OS) threads to provide +concurrency. It is, however, limited by the GIL and thus does not +provide parallelism. At this point we should mention that it is indeed +possible to run external functions written in C instead of Python in +parallel. Our work focuses on Python itself and ignores this aspect as +it requires writing in a different language. + +The second approach, \emph{multiprocessing}, uses multiple instances +of the interpreter itself and runs them in separate OS processes. +Here we actually get parallelism because we have one GIL per +interpreter, but of course we have the overhead of multiple processes +/ interpreters and also need to exchange data between them explicitly +and expensively. + +We focus on the \emph{threading} approach. This requires us to remove +the GIL from our interpreter in order to run code in parallel on +multiple threads. One approach to this is fine-grained locking instead +of a single global lock. Jython and IronPython are implementations of +this. It requires great care in order to avoid deadlocks, which is why +we follow the TM approach that provides a \emph{direct} replacement +for the GIL. It does not require careful placing of locks in the right +spots. We will compare our work with Jython for evaluation. + + +\subsection{Synchronization} + +\cfbolz{citation again needed for the whole subsection} + +It is well known that using locks to synchronize multiple threads is +hard. They are non-composable, have overhead, may deadlock, limit +scalability, and overall add a lot of complexity. For a better +parallel programming model for dynamic languages, we want to implement +another, well-known synchronization mechanism: \emph{atomic blocks}. + +Atomic blocks are composable, deadlock-free, higher-level and expose +useful atomicity and isolation guarantees to the application for a +series of instructions. An implementation using a GIL would simply +guarantee that the GIL is not released during the execution of the +atomic block. Using TM, we have the same effect by guaranteeing that +all instructions in an atomic block are executed inside a single +transaction. + + +\remi{STM, how atomicity \& isolation; reasons for overhead} + + +\section{Method} + +\subsection{Transactional Memory Model} + +In this section, we describe the general model of our TM system. This +should clarify the general semantics in commonly used terms from +the literature. + +\cfbolz{there is an overview paragraph of the idea missing, maybe in the introduction} + +\cfbolz{this all feels very much dumping details, needs more overview. why is this info important? the subsubsections don't have any connections} + +\subsubsection{Conflict Handling} + + +Our conflict detection works with \emph{object + granularity}. Conceptually, it is based on \emph{read} and +\emph{write sets} of transactions. Two transactions conflict if they +have accessed a common object that is now in the write set of at least +one of them. + +The \emph{concurrency control} works partly \emph{optimistically} for +reading of objects, where conflicts caused by just reading an object +in transactions are detected only when the transaction that writes the +object actually commits. For write-write conflicts we are currently +\emph{pessimistic}: Only one transaction may have a certain object in +its write set at any point in time, others trying to write to it will +have to wait or abort. + +We use \emph{lazy version management} to ensure that modifications by +a transaction are not visible to another transaction before the former +commits. + + + + +\subsubsection{Semantics} + +As required for TM systems, we guarantee complete \emph{isolation} +and \emph{atomicity} for transactions at all times. Furthermore, +the isolation provides full \emph{opacity} to always guarantee a consistent +read set. + +To support irreversible operations that cannot be undone when we abort +a transaction (e.g. I/O, syscalls, and non-transactional code in +general), we employ \emph{irrevocable} or \emph{inevitable +transactions}. These transactions are always guaranteed to +commit. There is always at most one such transaction running in the +system, thus their execution is serialised. With this guarantee, +providing \emph{strong isolation} and \emph{serializability} between +non-transactional code is possible by making the current transaction +inevitable right before running irreversible operations. + + +\subsubsection{Contention Management} + +When a conflict is detected, we perform some simple contention +management. First, inevitable transactions always win. Second, the +older transaction wins. Different schemes are possible. + + +\subsubsection{Software Transactional Memory} + +Generally speaking, the system is fully implemented in +software. However, we exploit some more advanced features of current +CPUs, especially \emph{memory segmentation, virtual memory,} and the +64-bit address space. + + +\subsection{Implementation} + +In this section, we will present the general idea of how the TM model +is implemented. Especially the aspects of providing isolation and +atomicity, as well as conflict detection are explained. We try to do +this without going into too much detail about the implementation. The +later section \ref{sub:Low-level-Implementation} will discuss it in +more depth. + + +\subsubsection{Memory Segmentation} + +A naive approach to providing complete isolation between threads is to +partition the virtual memory of a process into $N$ segments, one per +thread. Each segment then holds a copy of all the memory available to +the program. Thus, each thread automatically has a private copy of +every object that it can modify in complete isolation from other +threads. + +In order to reference these objects, we need references that are valid +in all threads and automatically point to the private copies. Since +an object's offset inside a segment is the same in all segments, we +can use this offset to reference objects. Because all segments are +copies of each other, this \emph{Segment Offset (SO)} points to the +private version of an object in all threads\,/\,segments. To then +translate this SO to a real virtual memory address when used inside a +thread, we need to add the thread's segment start address to the +SO. The result of this operation is called a \emph{Linear Address + (LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. + +x86-CPUs provide a feature called \emph{memory segmentation}. It +performs this translation from a SO to a LA directly in hardware. We +can use the segment register $\%gs$, which is mostly unused in current +applications. When this register points to a thread's segment start +address, we can instruct the CPU to perform the above translation from +a reference of the form $\%gs{::}SO$ to the right LA on its own. This +process is efficient enough that we can do it on every access to an +object. + +In summary, we can use a single SO to reference the same object in all +threads, and it will be translated by the CPU to a LA that always +points to the thread's private version of this object. Thereby, +threads are fully isolated from each other. However, $N$ segments +require $N$-times the memory and modifications on an object need to be +propagated to all segments. + +\begin{figure*}[t] + \begin{centering} + \includegraphics[scale=0.8]{\string"segment addressing\string".pdf} + \par\end{centering} + + \protect\caption{Segment Addressing\label{fig:Segment-Addressing}} +\end{figure*} + + + +\subsubsection{Page Sharing} + +In order to eliminate the prohibitive memory requirements of keeping +around $N$ segment copies, we share memory between them. The segments +are initially allocated in a single range of virtual memory by a call +to \lstinline!mmap()!. As illustrated in Figure +\ref{fig:mmap()-Page-Mapping}, \lstinline!mmap()! creates a mapping +between a range of virtual memory pages and virtual file pages. The +virtual file pages are then mapped lazily by the kernel to real +physical memory pages. The mapping generated by \lstinline!mmap()! is +initially linear but can be changed arbitrarily. Especially, we can +remap so that multiple virtual memory pages map to a single virtual +file page. This is what we use to share memory between the segments +since then we also only require one page of physical memory. + +\begin{figure}[h] + \begin{centering} + \includegraphics[scale=0.8]{\string"mmap pages\string".pdf} + \par\end{centering} + + \protect\caption{\texttt{mmap()} Page Mapping\label{fig:mmap()-Page-Mapping}} +\end{figure} + + +As illustrated in Figure \ref{fig:Page-Remapping}, in our initial +configuration (I) all segments are backed by their own range of +virtual file pages. This is the share-nothing configuration. + +We then designate segment 0 to be the \emph{Sharing-Segment}. No +thread gets this segment assigned to it, it simply holds the pages +shared between all threads. So in (II), we remap all virtual pages of +the segments $>0$ to the file pages of our sharing-segment. This is +the fully-shared configuration. + +During runtime, we can then privatize single pages in segments $>0$ +again by remapping single pages as seen in (III). + +Looking back at address translation for object references, we see now +that this is actually a two-step process. First, $\%gs{::}SO$ gets +translated to different linear addresses in different threads by the +CPU. Then, depending on the current mapping of virtual pages to file +pages, these LAs can map to a single file page in the sharing-segment, +or to privatized file pages in the corresponding segments. This +mapping is also performed efficiently by the CPU and can easily be +done on every access to an object. + +In summary, $\%gs{::}SO$ is translated efficiently by the CPU to +either a physical memory location which is shared between several +threads/segments, or to a location in memory private to the +segment/thread. This makes the memory segmentation model for +isolation memory efficient again. + +\begin{figure}[h] + \begin{centering} + \includegraphics[width=1\columnwidth]{\string"page remapping\string".pdf} + \par\end{centering} + + \protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to + segment 0, fully shared memory configuration. (III) privatize single + pages.\label{fig:Page-Remapping}} +\end{figure} + + + +\subsubsection{Isolation: Copy-On-Write} + +We now use these mechanisms to provide isolation for transactions. +Using write barriers, we implement a \emph{Copy-On-Write (COW)} on the +level of pages. Starting from the initial fully-shared configuration +(Figure \ref{fig:Page-Remapping}, (II)), when we need to modify an +object without other threads seeing the changes immediately, we ensure +that all pages belonging to the object are private to our segment. + +To detect when to privatize pages, we use write barriers before every +write. When the barrier detects that the object is not in a private +page (or any pages that belong to the object), we remap and copy the +pages to the thread's segment. From now on, the translation of +$\%gs{::}SO$ in this particular segment will resolve to the private +version of the object. Note, the SO used to reference the object does +not change during that process. + + + +\subsubsection{Isolation: Barriers} + +The job of barriers is to ensure complete isolation between transactions +and to register the objects in the read or write set. We insert read +and write barriers before reading or modifying an object except if +we statically know an object to be readable or writable already. +\begin{description} +\item [{Read~Barrier:}] Adds the object to the read set of the current + transaction. Since our two-step address translation automatically + resolves the reference to the private version of the object on every + access anyway, the read barrier does not need to do address translation anymore. +\item [{Write~Barrier:}] Adds the object to the read and write set of + the current transaction and checks if all pages of the object are + private, doing COW otherwise.\\ + Furthermore, we currently allow only one transaction modifying an + object at a time. To ensure this, we acquire a write lock on the object + and also eagerly check for a write-write conflict at this point. If + there is a conflict, we do some contention management to decide which + transaction has to wait or abort. Eagerly detecting this kind of conflict + is not inherent to our system, future experiments may show that we + want to lift this restriction. +\end{description} + + + +\subsubsection{Atomicity: Commit \& Abort} + +To provide atomicity for a transaction, we want to make changes globally +visible on commit. We also need to be able to completely abort a +transaction without a trace, like it never happened. +\begin{description} +\item [{Commit:}] If a transaction commits, we synchronize all threads + so that all of them are waiting in a safe point. In the committing + transaction, we go through all objects in the write set and check if + another transaction in a different segment read the same object. + Conflicts are resolved again by either the committing or the other + transaction waiting or aborting.\\ + We then push all changes of modified objects in private pages to all + the pages in other segments, including the sharing-segment (segment + 0). +\item [{Abort:}] On abort the transaction will forget about all the + changes it has done. All objects in the write set are reset by + copying their previous version from the sharing-segment into the + private pages of the aborting transaction.\\ + Re-sharing these pages instead of resetting the changes would not + only be slower because it involves a system call, it is also very + likely the page would get privatised again immediately after re-trying + the transaction. Since this again involves a full page copy, + resetting should be faster than re-sharing. +\end{description} + +\cfbolz{random question: did we investigate the extra memory requirements? we should characterize memory overhead somewhere, eg at least one byte per object for the read markers} + +\subsubsection{Summary} + +We provide isolation between transactions by privatizing the pages of +the segments belonging to the threads the transactions run in. To +detect when and which pages need privatization, we use write barriers +that trigger a COW of one or several pages. Conflicts, however, are +detected on the level of objects; based on the concept of read and +write sets. Barriers before reading and writing add objects to the +corresponding set; particularly detecting write-write conflicts +eagerly. On commit, we resolve read-write conflicts and push +modifications to other segments. Aborting transactions simply undo +their changes by copying from the sharing-segment. + + +\subsection{Low-level Implementation\label{sub:Low-level-Implementation}} + +In this section, we will provide details about the actual +implementation of the system and discuss some of the issues that we +encountered. + + +\subsubsection{Architecture} + +Our TM system is designed as a library that covers all aspects around +transactions and object management. The library consists of two parts: +(I) It provides a simple interface to starting and committing +transactions, as well as the required read and write barriers. (II) It +also includes a \emph{garbage collector (GC)} that is closely +integrated with the TM part (e.g. it shares the write barrier). The +close integration helps in order to know more about the lifetime of an +object, as will be explained in the following sections. + + +\subsubsection{Application Programming Interface\label{sub:Application-Programming-Interfac}} + +\begin{lstlisting} +void stm_start_transaction(tl, jmpbuf) +void stm_commit_transaction() +void stm_read(object_t *obj) +void stm_write(object_t *obj) +object_t *stm_allocate(ssize_t size_rounded) +STM_PUSH_ROOT(tl, obj) +STM_POP_ROOT(tl, obj) +\end{lstlisting} + + +\lstinline!stm_start_transaction()! starts a transaction. It requires +two arguments, the first being a thread-local data structure and the +second a buffer for use by \lstinline!setjmp()!. +\lstinline!stm_commit_transaction()! tries to commit the current +transaction. \lstinline!stm_read()!, \lstinline!stm_write()! perform +a read or a write barrier on an object and \lstinline!stm_allocate()! +allocates a new object with the specified size (must be a multiple of +16). \lstinline!STM_PUSH_ROOT()! and \lstinline!STM_POP_ROOT()! push +and pop objects on the shadow stack~\footnote{A stack for pointers to + GC objects that allows for precise garbage collection. All objects + on that stack are never seen as garbage and are thus always kept + alive.}. Objects have to be saved using this stack around calls +that may cause a GC cycle to happen, and also while there is no +transaction running. In this simplified API, only +\lstinline!stm_allocate()! and \lstinline!stm_commit_transaction()! +require saving object references. + +The type \lstinline!object_t! is special as it causes the +compiler~\footnote{Clang 3.5 with some patches to this address-space + 256 feature} to make all accesses through it relative to the $\%gs$ +register. With exceptions, nearly all accesses to objects managed by +the TM system should use this type so that the CPU will translate the +reference to the right version of the object. + + +\subsubsection{Setup\label{sub:Setup}} + +On startup, we reserve a big range of virtual memory with a call to +\lstinline!mmap()! and partition this space into $N+1$ segments. We +want to run $N$ threads in parallel while segment 0 is designated as +the \emph{sharing-segment} that is never assigned to a thread. + +The next step involves using \lstinline!remap_file_pages()!, a Linux +system call, to establish the fully-shared configuration. All pages +of segments $>0$ map to the pages of the sharing-segment. + +However, the layout of a segment is not uniform and we actually +privatize a few areas again right away. These areas are illustrated in +Figure \ref{fig:Segment-Layout} and explained here: +\begin{description}[noitemsep] +\item [{NULL~page:}] This page is unmapped and will produce a + segmentation violation when accessed. We use this to detect + erroneous dereferencing of \lstinline!NULL! references. All + $\%gs{::}SO$ translated to linear addresses will point to NULL pages + if SO is set to \lstinline!NULL!. +\item [{Segment-local~data:}] Some area private to the segment that + contains segment-local information. +\item [{Read~markers:}] These are pages that store information about + which objects were read in the current transaction running in this + segment. +\item [{Nursery:}] This area contains all the freshly allocated + objects (\emph{young objects}) of the current transaction. The GC + uses pointer-bump allocation in this area to allocate objects in the + first generation. +\item [{Old~object~space:}] These pages are the ones that are really + shared between segments. They mostly contain old objects but also + some young ones that were too big to be allocated in the nursery. +\end{description} + + +\begin{figure*}[t] + \begin{centering} + \includegraphics[scale=0.8]{\string"segment layout\string".pdf} + \par\end{centering} + + \protect\caption{Segment Layout\label{fig:Segment-Layout}} +\end{figure*} + + + +\subsubsection{Assigning Segments} + +From the above setup it is clear that the number of segments is +statically set to some $N$. That means that at any point in time, a +maximum of $N$ threads and their transactions can be running in +parallel. To support an unlimited number of threads in applications +that use this TM system, we assign segments dynamically to threads. + +At the start of a transaction, the thread it is running in acquires a +segment. It may have to wait until another thread finishes its +transaction and releases a segment. Fairness is not guaranteed yet, as +we simply assume a fair scheduling policy in the operating system when +waiting on a condition variable. + +Therefore, a thread may be assigned to different segments each time it +starts a transaction. Although, we try to assign it the same segment +again if possible. + + + + +\subsubsection{Garbage Collection} + +Garbage collection plays a big role in our TM system. The GC is +generational and has two generations: the \emph{young} and the +\emph{old} generation. + +The \textbf{young generation}, where objects are considered to be +\emph{young} and reside in the \emph{Nursery}, is collected by +\emph{minor collections}. These collections move the surviving objects +out of the nursery into the old object space, which can be done +without stopping other threads. This is done either if the nursery has +no space left anymore or if we are committing the current +transaction. Consequently, all objects are old and the nursery empty +after a transaction commits. Furthermore, all objects in the nursery +were always created in the current transaction. This fact is useful +since we do not need to call any barrier on this kind of objects. + +To improve this situation even more, we introduce the concept of +\emph{overflow objects}. If a minor collection needs to occur during a +transaction, we empty the nursery and mark each surviving object in +the old object space with an \lstinline!overflow_number! globally +unique to the current transaction. That way we can still detect in a +medium-fast path inside barriers that the object still belongs to the +current transaction. + +The \textbf{old generation}, where objects are considered to be +\emph{old} and never move again, is collected by \emph{major + collections}. These collections are implemented in a stop-the-world +kind of way and first force minor collections in all threads. The +major goal is to free objects in the old objects space. Furthermore, +we optimistically re-share pages that do not need to be private +anymore. + +As seen in the API (section~\ref{sub:Application-Programming-Interfac}), +we use a \emph{shadow stack} in order to provide precise garbage +collection. Any time we call a function that possibly triggers a +collection, we need to save the objects that we need afterwards on the +shadow stack using \lstinline!STM_PUSH_ROOT()!. That way, they will +not be freed. And in case they were young, we get their new location +in the old object space when getting them back from the stack using +\lstinline!STM_POP_ROOT()!. + + + + +\subsubsection{Read Barrier} + +The point of the read barrier is to add the object to the read set of +the transaction. This information is needed to detect conflicts +between transactions. In other STM systems, it also resolves an object reference to +a private copy, but since the CPU performs our address translation on +every object access efficiently, we do not need to do that in our +barrier. + +To add the object to the read set, for us it is enough to mark it as +read. Since this information needs to be local to the segment, we need +to store it in private pages. The area is called \emph{read markers +}and already mentioned in section \ref{sub:Setup}. This area can be +seen as a continuous array of bytes that is indexed from the start of +the segment by an object's reference ($SO$) divided by 16 (this +requires objects of at least 16 bytes in size). Instead of just +setting the byte to \lstinline!true! if the corresponding object was +read, we set it to a \lstinline!read_version! belonging to the +transaction, which will be incremented on each commit. Thereby, we +can avoid resetting the bytes to \lstinline!false! on commit and only +need to do this every 255 transactions. The whole code for the barrier +is easily optimizable for compilers as well as perfectly predictable +for CPUs: + +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void stm_read(SO): + *(SO >> 4) = read_version +\end{lstlisting} + + + +\subsubsection{Write Barrier} + +The job of the write barrier is twofold: first, it serves as a write +barrier for the garbage collector and second, it supports +copy-on-write and adds objects to the write set of the transaction. + +The \textbf{fast path} of the write barrier is very simple. We only +need to check for the flag \lstinline!WRITE_BARRIER! in the object's +header and call the slow path if it is set. This flag is set either if +the object is old and comes from an earlier transaction, or if there +was a minor collection which will add the flag again on all +objects. It is never set on freshly allocated objects that still +reside in the nursery. + +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void stm_write(SO): + if SO->flags & WRITE_BARRIER: + write_slowpath(SO) +\end{lstlisting} + + +The \textbf{slow path} is shown here: + +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +void write_slowpath(SO): + // GC part: + list_append(to_trace, SO) + if is_overflow_obj(SO): + SO->flags &= ~WRITE_BARRIER + return + // STM part + stm_read(SO) + lock_idx = SO >> 4 + retry: + if write_locks[lock_idx] == our_num: + // we already own it + else if write_locks[lock_idx] == 0: + if cmp_and_swap(&write_locks[lock_idx], + 0, our_num): + list_append(modified_old_objects, SO) + privatize_pages(SO) + else: + goto retry + else: + w_w_contention_management() + goto retry + SO->flags &= ~WRITE_BARRIER +\end{lstlisting} + + +First comes the \emph{GC part}: In any case, the object will be added +to the list of objects that need tracing in the next minor collection +(\lstinline!to_trace!). This is necessary in case we write a +reference to it that points to a young object. We then need to trace +it during the next minor collection in order to mark the young object +alive and to update its reference to the new location it gets moved +to. The check for \lstinline!is_overflow_obj()! tells us if the +object was actually created in this transaction. In that case, we do +not need to execute the following \emph{TM part}. We especially do +not need to privatize the page since no other transaction knows about +these ``old'' objects. + +For TM, we first perform a read barrier on the object. We then try to +acquire its write lock. \lstinline!write_locks! again is a simple +global array of bytes that is indexed with the SO of the object +divided by 16. If we already own the lock, we are done. If someone +else owns the lock, we will do a write-write contention management +that will abort either us or the current owner of the object. If we +succeed in acquiring the lock using an atomic +\lstinline!cmp_and_swap!, we need to add the object to the write set +(a simple list called \lstinline!modified_old_objects!) and privatize +all pages belonging to it (copy-on-write). + +In all cases, we remove the \lstinline!WRITE_BARRIER! flag from the +object before we return. Thus, we never trigger the slow path again +before we do the next minor collection (also part of a commit) or we +start the next transaction. + + + + +\subsubsection{Abort} + +Aborting a transaction is rather easy. The first step is to reset the +nursery and all associated data structures. The second step is to go +over all objects in the write set (\lstinline!modified_old_objects!) +and reset any modifications in our private pages by copying from the +sharing-segment. What is left is to use \lstinline!longjmp()! to jump +back to the location initialized by a \lstinline!setjmp()! in +\lstinline!stm_start_transaction()!. Increasing the +\lstinline!read_version! is also done there. + + + + +\subsubsection{Commit} + +Committing a transaction needs a bit more work. First, we synchronize +all threads so that the committing one is the only one running and all +the others are waiting in a safe point. We then go through the write +set (\lstinline!modified_old_objects!) and check the corresponding +\lstinline!read_markers! in other threads/segments. If we detect a +read-write conflict, we do contention management to either abort us or +the other transaction, or to simply wait a bit (see \ref{subsub:contentionmanagement}). + +After verifying that there are no conflicts anymore, we copy all our +changes done to the objects in the write set to all other segments, +including the sharing-segment. This is safe since we synchronised all +threads. We also need to push overflow objects generated by minor +collections to other segments, since they may reside partially in +private pages. At that point we also get a new +\lstinline!overflow_number! by increasing a global one, so that it +stays globally unique for each transaction. Increasing the +\lstinline!read_version! is then done at the start of a new +transaction. + + + +\subsubsection{Thread Synchronization} + +A requirement for performing a commit is to synchronize all threads so +that we can safely update objects in other segments. To make this +synchronization fast and cheap, we do not want to insert an additional +check regularly in order to see if synchronization is requested. We +use a trick relying on the fact that dynamic languages are usually +very high-level and thus allocate a lot of objects very regularly. +This is done through the function \lstinline!stm_allocate! shown +below: + +\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +object_t *stm_allocate(ssize_t size_rounded): + result = nursery_current + nursery_current += size_rounded + if nursery_current > nursery_end: + return allocate_slowpath(size_rounded) + return result +\end{lstlisting} + + +This code does simple pointer-bump allocation in the nursery. If there +is still space left in the nursery, we return +\lstinline!nursery_current! and bump it up by +\lstinline!size_rounded!. The interesting part is the check +\lstinline!nursery_current > nursery_end! which will trigger the slow +path of the function to possibly perform a minor collection in order +to free up space in the nursery. + +If we want to synchronize all threads, we can rely on this check being +performed regularly. So what we do is to set the +\lstinline!nursery_end! to $0$ in all segments that we want to +synchronize. The mentioned check will then fail in those segments and +call the slow path. In \lstinline!allocate_slowpath! they can simply +check for this condition and enter a safe point. + +For other synchronization requirements, for example: +\begin{itemize}[noitemsep] +\item waiting for a segment to be released, +\item waiting for a transaction to abort or commit, +\item waiting for all threads to reach their safe points, +\end{itemize} +we use a set of condition variables to wait or signal other threads. + + +\subsubsection{Contention Management\label{subsub:contentionmanagement}} + +On encountering conflicts, we employ contention management to solve +the problem as well as we can. The general rules are: +\begin{itemize}[noitemsep] +\item prefer transactions that started earlier to younger transactions +\item to support \emph{inevitable} transactions, we always prefer them + to others since they cannot abort +\end{itemize} +We can either simply abort a transaction to let the other one succeed, +or we can also wait until the other transaction committed. The latter +is an interesting option if we are trying to commit a write and +another transaction already read the object. We can then signal the +other transaction to commit as soon as possible and wait. After +waiting, there is now no conflict between our write and the already +committed read anymore. + + + + +\section{Experimental Results} + +compare some programs between +\begin{itemize}[noitemsep] +\item pypy +\item pypy-jit +\item pypy-stm +\item pypy-stm-jit +\item cpython +\item jython +\end{itemize} + + + +\section{Related Work} + + +\section{Conclusions} %% \appendix diff --git a/talk/dls2014/paper/segment addressing.pdf b/talk/dls2014/paper/segment addressing.pdf new file mode 100644 index 0000000000000000000000000000000000000000..758d8d08cfa37fde50c8e7341d8b7513e4dd9117 GIT binary patch [cut] diff --git a/talk/dls2014/paper/segment layout.pdf b/talk/dls2014/paper/segment layout.pdf new file mode 100644 index 0000000000000000000000000000000000000000..688fddd705f33cbcfb62af968306a84d439ef817 GIT binary patch [cut] diff --git a/talk/dls2014/report/report.pdf b/talk/dls2014/report/report.pdf index e3b37b81068cc9a3c9f2746ca8e50c0d4deefd9f..b8ca91fce00e72adf22312b5ab8fff84db5ecd92 GIT binary patch [cut] diff --git a/talk/dls2014/report/report.tex b/talk/dls2014/report/report.tex --- a/talk/dls2014/report/report.tex +++ b/talk/dls2014/report/report.tex @@ -42,7 +42,7 @@ % Title. % ------ -\title{C7: Fast software transactional memory for dynamic languages} +\title{Virtual Memory Assisted Transactional Memory for Dynamic Languages} % % Single address. % --------------- @@ -225,7 +225,7 @@ \subsection{Synchronization} - cfbolz{citation again needed for the whole subsection} +\cfbolz{citation again needed for the whole subsection} It is well known that using locks to synchronize multiple threads is hard. They are non-composable, have overhead, may deadlock, limit @@ -250,7 +250,7 @@ \subsection{Transactional Memory Model} In this section, we describe the general model of our TM system. This -should clarify the general semantics using commonly used terms from +should clarify the general semantics in commonly used terms from the literature. \cfbolz{there is an overview paragraph of the idea missing, maybe in the introduction} @@ -283,21 +283,20 @@ \subsubsection{Semantics} -As required for TM systems, we guarantee complete \emph{isolation -}and \emph{atomicity} for transactions at all times. Furthermore, +As required for TM systems, we guarantee complete \emph{isolation} +and \emph{atomicity} for transactions at all times. Furthermore, the isolation provides full \emph{opacity} to always guarantee a consistent read set. -\cfbolz{this paragraph is hard to understand without giving an example (eg console printing) when it is useful} - -We support the notion of \emph{inevitable transactions} that are always -guaranteed to commit. There is always at most one such transaction -running in the system. We use this kind of transaction to provide -\emph{strong isolation} by running non-transactional code in the context -of inevitable transactions and to still provide the \emph{serializability} -of all transaction schedules. - - +To support irreversible operations that cannot be undone when we abort +a transaction (e.g. I/O, syscalls, and non-transactional code in +general), we employ \emph{irrevocable} or \emph{inevitable +transactions}. These transactions are always guaranteed to +commit. There is always at most one such transaction running in the +system, thus their execution is serialised. With this guarantee, +providing \emph{strong isolation} and \emph{serializability} between +non-transactional code is possible by making the current transaction +inevitable right before running irreversible operations. \subsubsection{Contention Management} @@ -334,21 +333,25 @@ every object that it can modify in complete isolation from other threads. -To get references to objects that are valid in all threads, we will -use \cfbolz{use for what?} the object's offset inside the segment. Since all segments are -copies of each other, the \emph{Segment Offset (SO)} will point to the -private version of an object in all threads/segments. To then +In order to reference these objects, we need references that are valid +in all threads and automatically point to the private copies. Since +an object's offset inside a segment is the same in all segments, we +can use this offset to reference objects. Because all segments are +copies of each other, this \emph{Segment Offset (SO)} points to the +private version of an object in all threads\,/\,segments. To then translate this SO to a real virtual memory address when used inside a thread, we need to add the thread's segment start address to the SO. The result of this operation is called a \emph{Linear Address (LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. -\cfbolz{here it needs to say that this is x86 specific} - -To make this address translation efficient, we use the segment -register $\%gs$. When this register points to a thread's segment start +x86-CPUs provide a feature called \emph{memory segmentation}. It +performs this translation from a SO to a LA directly in hardware. We +can use the segment register $\%gs$, which is mostly unused in current +applications. When this register points to a thread's segment start address, we can instruct the CPU to perform the above translation from -a reference of the form $\%gs{::}SO$ to the right LA on its own. +a reference of the form $\%gs{::}SO$ to the right LA on its own. This +process is efficient enough that we can do it on every access to an +object. In summary, we can use a single SO to reference the same object in all threads, and it will be translated by the CPU to a LA that always @@ -489,12 +492,16 @@ transaction waiting or aborting.\\ We then push all changes of modified objects in private pages to all the pages in other segments, including the sharing-segment (segment - 0). \cfbolz{can it really happen that you push pages to other segments? I thought it's always just back to the sharing segment} + 0). \item [{Abort:}] On abort the transaction will forget about all the changes it has done. All objects in the write set are reset by copying their previous version from the sharing-segment into the - private pages of the aborting transaction. - \cfbolz{why doing any copying? aren't the pages re-shared instead?} + private pages of the aborting transaction.\\ + Re-sharing these pages instead of resetting the changes would not + only be slower because it involves a system call, it is also very + likely the page would get privatised again immediately after re-trying + the transaction. Since this again involves a full page copy, + resetting should be faster than re-sharing. \end{description} \cfbolz{random question: did we investigate the extra memory requirements? we should characterize memory overhead somewhere, eg at least one byte per object for the read markers} @@ -639,11 +646,10 @@ \subsubsection{Garbage Collection} Garbage collection plays a big role in our TM system. The GC is -generational and has two generations. +generational and has two generations: the \emph{young} and the +\emph{old} generation. -\cfbolz{maybe use "young" and "old" generation, if there are only two} - -The \textbf{first generation}, where objects are considered to be +The \textbf{young generation}, where objects are considered to be \emph{young} and reside in the \emph{Nursery}, is collected by \emph{minor collections}. These collections move the surviving objects out of the nursery into the old object space, which can be done @@ -662,7 +668,7 @@ medium-fast path inside barriers that the object still belongs to the current transaction. -The \textbf{second generation}, where objects are considered to be +The \textbf{old generation}, where objects are considered to be \emph{old} and never move again, is collected by \emph{major collections}. These collections are implemented in a stop-the-world kind of way and first force minor collections in all threads. The @@ -816,11 +822,11 @@ set (\lstinline!modified_old_objects!) and check the corresponding \lstinline!read_markers! in other threads/segments. If we detect a read-write conflict, we do contention management to either abort us or -the other transaction, or to simply wait a bit. \cfbolz{why does waiting help?} +the other transaction, or to simply wait a bit (see \ref{subsub:contentionmanagement}). After verifying that there are no conflicts anymore, we copy all our changes done to the objects in the write set to all other segments, -including the sharing-segment. This is safe since we synchronized all +including the sharing-segment. This is safe since we synchronised all threads. We also need to push overflow objects generated by minor collections to other segments, since they may reside partially in private pages. At that point we also get a new @@ -831,11 +837,8 @@ - \subsubsection{Thread Synchronization} - - A requirement for performing a commit is to synchronize all threads so that we can safely update objects in other segments. To make this synchronization fast and cheap, we do not want to insert an additional @@ -879,7 +882,7 @@ we use a set of condition variables to wait or signal other threads. -\subsubsection{Contention Management} +\subsubsection{Contention Management\label{subsub:contentionmanagement}} On encountering conflicts, we employ contention management to solve the problem as well as we can. The general rules are: From noreply at buildbot.pypy.org Thu May 15 16:48:28 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 May 2014 16:48:28 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: paper updates Message-ID: <20140515144828.2230C1C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5251:46bc3a8000ea Date: 2014-05-15 16:48 +0200 http://bitbucket.org/pypy/extradoc/changeset/46bc3a8000ea/ Log: paper updates diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -142,7 +142,7 @@ good performance that enables new applications. However, a parallel programming model was not part of the design of those languages. Thus, the reference implementations of e.g. Python and Ruby use a single, -global interpreter lock (GIL) to serialize the execution of code in +global interpreter lock (GIL) to serialise the execution of code in threads. While this GIL prevents any parallelism from occurring, it also @@ -151,18 +151,23 @@ in-between such instructions, it provides perfect isolation and atomicity between multiple threads for a series of instructions. Another technology that can provide the same guarantees -is transactional memory (TM). +is transactional memory (TM). \remi{cite our position paper} There have been several attempts at replacing the GIL with TM. Using transactions to enclose multiple bytecode instructions, we can get the very same semantics as the GIL while possibly executing several transactions in parallel. Furthermore, by exposing these interpreter-level transactions to the application in the form of -\emph{atomic blocks}, we give dynamic languages a new synchronization +\emph{atomic blocks}, we give dynamic languages a new synchronisation mechanism that avoids several of the problems of locks as they are used now. -\cfbolz{the above is good, here is something missing: problems with current STM approaches, outlining the intuition behind the new one} +\remi{cite and extract from (our pos. paper):} +TM systems come in can be broadly categorised as hardware based (HTM), +software based (STM), or hybrid systems (HyTM). HTM systems are limited +by hardware constraints, while STM systems have a lot of overhead. +In this paper, we describe how we manage to lower the overhead of our +STM system so that it can be seen as a viable replacement for the GIL. Our contributions include: \begin{itemize}[noitemsep] @@ -173,7 +178,7 @@ \item This new STM system is used to replace the GIL in Python and is then evaluated extensively. \item We introduce atomic blocks to the Python language to provide a - backwards compatible, composable synchronization mechanism for + backwards compatible, composable synchronisation mechanism for threads. \end{itemize} @@ -187,19 +192,19 @@ Transactional memory (TM) is a concurrency control mechanism that comes from database systems. Using transactions, we can group a series of instructions performing operations on memory and make them happen -atomically and in complete isolations from other +atomically and in complete isolation from other transactions. \emph{Atomicity} means that all these instructions in -the transaction and their effects seem to happen at one, undividable +the transaction and their effects seem to happen at one, indivisible point in time. Other transactions never see inconsistent state of a partially executed transaction which is called \emph{isolation}. If we start multiple such transactions in multiple threads, the TM system guarantees that the outcome of running the transactions is -\emph{serializable}. Meaning, the outcome is equal to some sequential +\emph{serialisable}. Meaning, the outcome is equal to some sequential execution of these transactions. This means that the approach provides the same semantics as using the GIL while still allowing the TM system to -run transactions in parallel as an optimization. +run transactions in parallel as an optimisation. \subsection{Python} @@ -239,15 +244,15 @@ spots. We will compare our work with Jython for evaluation. -\subsection{Synchronization} +\subsection{Synchronisation} \cfbolz{citation again needed for the whole subsection} -It is well known that using locks to synchronize multiple threads is +It is well known that using locks to synchronise multiple threads is hard. They are non-composable, have overhead, may deadlock, limit scalability, and overall add a lot of complexity. For a better parallel programming model for dynamic languages, we want to implement -another, well-known synchronization mechanism: \emph{atomic blocks}. +another, well-known synchronisation mechanism: \emph{atomic blocks}. Atomic blocks are composable, deadlock-free, higher-level and expose useful atomicity and isolation guarantees to the application for a @@ -265,69 +270,61 @@ \subsection{Transactional Memory Model} -In this section, we describe the general model of our TM system. This -should clarify the general semantics in commonly used terms from -the literature. +In this section, we characterise the model of our TM system and its +guarantees as well as some of the design choices we made. This should +clarify the general semantics in commonly used terms from the +literature.\remi{cite Transactional Memory 2nd edition} -\cfbolz{there is an overview paragraph of the idea missing, maybe in the introduction} - -\cfbolz{this all feels very much dumping details, needs more overview. why is this info important? the subsubsections don't have any connections} +Our TM system is fully implemented in software. However, we do exploit +some more advanced features of current CPUs, particularly \emph{memory +segmentation, virtual memory,} and the 64-bit address space. Still, +it cannot be classified as a hybrid TM system since it currently +makes no use of any HTM present in the CPU. \subsubsection{Conflict Handling} +We implement an object-based TM system, thus it makes sense to detect +conflicts with \emph{object granularity}. With this choice, if two +transactions access the same object and at least one access is a +write, we count it as a conflict. Conceptually, it is based on +\emph{read} and \emph{write sets} of transactions. Reading from an +object adds the object to the read set, writing to it adds it to both +sets. Two transactions conflict if they have accessed a common object +that is in the write set of at least one of them. -Our conflict detection works with \emph{object - granularity}. Conceptually, it is based on \emph{read} and -\emph{write sets} of transactions. Two transactions conflict if they -have accessed a common object that is now in the write set of at least -one of them. - -The \emph{concurrency control} works partly \emph{optimistically} for -reading of objects, where conflicts caused by just reading an object -in transactions are detected only when the transaction that writes the -object actually commits. For write-write conflicts we are currently +The detection, or \emph{concurrency control}, works partly +\emph{optimistically} for reading objects. Read-write conflicts +between two transactions are detected in both exactly at the time when +the writing one commits. For write-write conflicts we are currently \emph{pessimistic}: Only one transaction may have a certain object in its write set at any point in time, others trying to write to it will -have to wait or abort. +have to wait or abort. This decision needs to be evaluated further +in the future. -We use \emph{lazy version management} to ensure that modifications by -a transaction are not visible to another transaction before the former -commits. - - - +When a conflict is detected, we perform some simple contention +management that generally prefers the older transaction to the younger. \subsubsection{Semantics} -As required for TM systems, we guarantee complete \emph{isolation} -and \emph{atomicity} for transactions at all times. Furthermore, -the isolation provides full \emph{opacity} to always guarantee a consistent -read set. +As required for TM systems, we guarantee complete \emph{isolation} and +\emph{atomicity} for transactions at all times. Our method of choice +is \emph{lazy version management}. Modifications by a transaction are +not visible to another transaction before the former commits. +Furthermore, the isolation provides full \emph{opacity} to always +guarantee a consistent read set even for non-committed transactions. +\remi{cite On the Correctness of Transactional Memory} -To support irreversible operations that cannot be undone when we abort -a transaction (e.g. I/O, syscalls, and non-transactional code in -general), we employ \emph{irrevocable} or \emph{inevitable -transactions}. These transactions are always guaranteed to -commit. There is always at most one such transaction running in the -system, thus their execution is serialised. With this guarantee, -providing \emph{strong isolation} and \emph{serializability} between -non-transactional code is possible by making the current transaction -inevitable right before running irreversible operations. - - -\subsubsection{Contention Management} - -When a conflict is detected, we perform some simple contention -management. First, inevitable transactions always win. Second, the -older transaction wins. Different schemes are possible. - - -\subsubsection{Software Transactional Memory} - -Generally speaking, the system is fully implemented in -software. However, we exploit some more advanced features of current -CPUs, especially \emph{memory segmentation, virtual memory,} and the -64-bit address space. +To also support these properties for irreversible operations that +cannot be undone when we abort a transaction (e.g. I/O, syscalls, and +non-transactional code in general), we use \emph{irrevocable} or +\emph{inevitable transactions}. These transactions are always +guaranteed to commit, which is why they always have to win in case +there is a conflict with another, normal transaction. There is always +at most one such transaction running in the system, thus their +execution is serialised. With this guarantee, providing \emph{strong +isolation} and \emph{serialisability} between non-transactional code +is possible by making the current transaction inevitable right before +running irreversible operations. \subsection{Implementation} @@ -420,7 +417,7 @@ the segments $>0$ to the file pages of our sharing-segment. This is the fully-shared configuration. -During runtime, we can then privatize single pages in segments $>0$ +During runtime, we can then privatise single pages in segments $>0$ again by remapping single pages as seen in (III). Looking back at address translation for object references, we see now @@ -428,7 +425,7 @@ translated to different linear addresses in different threads by the CPU. Then, depending on the current mapping of virtual pages to file pages, these LAs can map to a single file page in the sharing-segment, -or to privatized file pages in the corresponding segments. This +or to privatised file pages in the corresponding segments. This mapping is also performed efficiently by the CPU and can easily be done on every access to an object. @@ -444,7 +441,7 @@ \par\end{centering} \protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to - segment 0, fully shared memory configuration. (III) privatize single + segment 0, fully shared memory configuration. (III) privatise single pages.\label{fig:Page-Remapping}} \end{figure} @@ -459,7 +456,7 @@ object without other threads seeing the changes immediately, we ensure that all pages belonging to the object are private to our segment. -To detect when to privatize pages, we use write barriers before every +To detect when to privatise pages, we use write barriers before every write. When the barrier detects that the object is not in a private page (or any pages that belong to the object), we remap and copy the pages to the thread's segment. From now on, the translation of @@ -500,7 +497,7 @@ visible on commit. We also need to be able to completely abort a transaction without a trace, like it never happened. \begin{description} -\item [{Commit:}] If a transaction commits, we synchronize all threads +\item [{Commit:}] If a transaction commits, we synchronise all threads so that all of them are waiting in a safe point. In the committing transaction, we go through all objects in the write set and check if another transaction in a different segment read the same object. @@ -520,13 +517,13 @@ resetting should be faster than re-sharing. \end{description} -\cfbolz{random question: did we investigate the extra memory requirements? we should characterize memory overhead somewhere, eg at least one byte per object for the read markers} +\cfbolz{random question: did we investigate the extra memory requirements? we should characterise memory overhead somewhere, eg at least one byte per object for the read markers} \subsubsection{Summary} -We provide isolation between transactions by privatizing the pages of +We provide isolation between transactions by privatising the pages of the segments belonging to the threads the transactions run in. To -detect when and which pages need privatization, we use write barriers +detect when and which pages need privatisation, we use write barriers that trigger a COW of one or several pages. Conflicts, however, are detected on the level of objects; based on the concept of read and write sets. Barriers before reading and writing add objects to the @@ -605,7 +602,7 @@ of segments $>0$ map to the pages of the sharing-segment. However, the layout of a segment is not uniform and we actually -privatize a few areas again right away. These areas are illustrated in +privatise a few areas again right away. These areas are illustrated in Figure \ref{fig:Segment-Layout} and explained here: \begin{description}[noitemsep] \item [{NULL~page:}] This page is unmapped and will produce a @@ -725,10 +722,10 @@ transaction, which will be incremented on each commit. Thereby, we can avoid resetting the bytes to \lstinline!false! on commit and only need to do this every 255 transactions. The whole code for the barrier -is easily optimizable for compilers as well as perfectly predictable +is easily optimisable for compilers as well as perfectly predictable for CPUs: -\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting} void stm_read(SO): *(SO >> 4) = read_version \end{lstlisting} @@ -749,7 +746,7 @@ objects. It is never set on freshly allocated objects that still reside in the nursery. -\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting} void stm_write(SO): if SO->flags & WRITE_BARRIER: write_slowpath(SO) @@ -758,7 +755,7 @@ The \textbf{slow path} is shown here: -\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting} void write_slowpath(SO): // GC part: list_append(to_trace, SO) @@ -794,7 +791,7 @@ to. The check for \lstinline!is_overflow_obj()! tells us if the object was actually created in this transaction. In that case, we do not need to execute the following \emph{TM part}. We especially do -not need to privatize the page since no other transaction knows about +not need to privatise the page since no other transaction knows about these ``old'' objects. For TM, we first perform a read barrier on the object. We then try to @@ -805,7 +802,7 @@ that will abort either us or the current owner of the object. If we succeed in acquiring the lock using an atomic \lstinline!cmp_and_swap!, we need to add the object to the write set -(a simple list called \lstinline!modified_old_objects!) and privatize +(a simple list called \lstinline!modified_old_objects!) and privatise all pages belonging to it (copy-on-write). In all cases, we remove the \lstinline!WRITE_BARRIER! flag from the @@ -823,7 +820,7 @@ over all objects in the write set (\lstinline!modified_old_objects!) and reset any modifications in our private pages by copying from the sharing-segment. What is left is to use \lstinline!longjmp()! to jump -back to the location initialized by a \lstinline!setjmp()! in +back to the location initialised by a \lstinline!setjmp()! in \lstinline!stm_start_transaction()!. Increasing the \lstinline!read_version! is also done there. @@ -832,7 +829,7 @@ \subsubsection{Commit} -Committing a transaction needs a bit more work. First, we synchronize +Committing a transaction needs a bit more work. First, we synchronise all threads so that the committing one is the only one running and all the others are waiting in a safe point. We then go through the write set (\lstinline!modified_old_objects!) and check the corresponding @@ -853,18 +850,18 @@ -\subsubsection{Thread Synchronization} +\subsubsection{Thread Synchronisation} -A requirement for performing a commit is to synchronize all threads so +A requirement for performing a commit is to synchronise all threads so that we can safely update objects in other segments. To make this -synchronization fast and cheap, we do not want to insert an additional -check regularly in order to see if synchronization is requested. We +synchronisation fast and cheap, we do not want to insert an additional +check regularly in order to see if synchronisation is requested. We use a trick relying on the fact that dynamic languages are usually very high-level and thus allocate a lot of objects very regularly. This is done through the function \lstinline!stm_allocate! shown below: -\begin{lstlisting}[basicstyle={\footnotesize\ttfamily},tabsize=4] +\begin{lstlisting} object_t *stm_allocate(ssize_t size_rounded): result = nursery_current nursery_current += size_rounded @@ -882,14 +879,14 @@ path of the function to possibly perform a minor collection in order to free up space in the nursery. -If we want to synchronize all threads, we can rely on this check being +If we want to synchronise all threads, we can rely on this check being performed regularly. So what we do is to set the \lstinline!nursery_end! to $0$ in all segments that we want to -synchronize. The mentioned check will then fail in those segments and +synchronise. The mentioned check will then fail in those segments and call the slow path. In \lstinline!allocate_slowpath! they can simply check for this condition and enter a safe point. -For other synchronization requirements, for example: +For other synchronisation requirements, for example: \begin{itemize}[noitemsep] \item waiting for a segment to be released, \item waiting for a transaction to abort or commit, From noreply at buildbot.pypy.org Thu May 15 17:05:28 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 15 May 2014 17:05:28 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add some missing points Message-ID: <20140515150528.171D51C02D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5252:1d2165d4f2df Date: 2014-05-15 17:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/1d2165d4f2df/ Log: add some missing points diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -350,15 +350,15 @@ in all threads and automatically point to the private copies. Since an object's offset inside a segment is the same in all segments, we can use this offset to reference objects. Because all segments are -copies of each other, this \emph{Segment Offset (SO)} points to the +copies of each other, this \emph{Segment Offset ($SO$)} points to the private version of an object in all threads\,/\,segments. To then -translate this SO to a real virtual memory address when used inside a +translate this $SO$ to a real virtual memory address when used inside a thread, we need to add the thread's segment start address to the -SO. The result of this operation is called a \emph{Linear Address +$SO$. The result of this operation is called a \emph{Linear Address (LA)}. This is illustrated in Figure \ref{fig:Segment-Addressing}. x86-CPUs provide a feature called \emph{memory segmentation}. It -performs this translation from a SO to a LA directly in hardware. We +performs this translation from a $SO$ to a LA directly in hardware. We can use the segment register $\%gs$, which is mostly unused in current applications. When this register points to a thread's segment start address, we can instruct the CPU to perform the above translation from @@ -366,7 +366,7 @@ process is efficient enough that we can do it on every access to an object. -In summary, we can use a single SO to reference the same object in all +In summary, we can use a single $SO$ to reference the same object in all threads, and it will be translated by the CPU to a LA that always points to the thread's private version of this object. Thereby, threads are fully isolated from each other. However, $N$ segments @@ -461,7 +461,7 @@ page (or any pages that belong to the object), we remap and copy the pages to the thread's segment. From now on, the translation of $\%gs{::}SO$ in this particular segment will resolve to the private -version of the object. Note, the SO used to reference the object does +version of the object. Note, the $SO$ used to reference the object does not change during that process. @@ -609,7 +609,7 @@ segmentation violation when accessed. We use this to detect erroneous dereferencing of \lstinline!NULL! references. All $\%gs{::}SO$ translated to linear addresses will point to NULL pages - if SO is set to \lstinline!NULL!. + if $SO$ is set to \lstinline!NULL!. \item [{Segment-local~data:}] Some area private to the segment that contains segment-local information. \item [{Read~markers:}] These are pages that store information about @@ -712,18 +712,19 @@ To add the object to the read set, for us it is enough to mark it as read. Since this information needs to be local to the segment, we need -to store it in private pages. The area is called \emph{read markers -}and already mentioned in section \ref{sub:Setup}. This area can be -seen as a continuous array of bytes that is indexed from the start of -the segment by an object's reference ($SO$) divided by 16 (this -requires objects of at least 16 bytes in size). Instead of just -setting the byte to \lstinline!true! if the corresponding object was -read, we set it to a \lstinline!read_version! belonging to the -transaction, which will be incremented on each commit. Thereby, we -can avoid resetting the bytes to \lstinline!false! on commit and only -need to do this every 255 transactions. The whole code for the barrier -is easily optimisable for compilers as well as perfectly predictable -for CPUs: +to store it in private pages. The area is called \emph{read markers} +and already mentioned in section \ref{sub:Setup}. + +This area can be seen as a continuous array of bytes that is indexed +from the start of the segment by an object's reference ($SO$) divided +by 16 (this is where the requirement of objects to be of at least 16 +bytes in size comes from). Instead of just setting the byte to +\lstinline!true! if the corresponding object was read, we set it to a +\lstinline!read_version! belonging to the transaction, which will be +incremented on each commit. Thereby, we can avoid resetting the bytes +to \lstinline!false! on commit and only need to do this every 255 +transactions. The whole code for the barrier is easily optimisable for +compilers as well as perfectly predictable for CPUs: \begin{lstlisting} void stm_read(SO): @@ -796,7 +797,7 @@ For TM, we first perform a read barrier on the object. We then try to acquire its write lock. \lstinline!write_locks! again is a simple -global array of bytes that is indexed with the SO of the object +global array of bytes that is indexed with the $SO$ of the object divided by 16. If we already own the lock, we are done. If someone else owns the lock, we will do a write-write contention management that will abort either us or the current owner of the object. If we @@ -914,10 +915,37 @@ +\section{Evaluation} -\section{Experimental Results} +\subsection{Memory Requirements} -compare some programs between +\begin{itemize} +\item stm\_flags per object +\item read markers and other sections +\item private pages +\end{itemize} + +maybe some memory usage graph over time + + +\subsection{Overhead Breakdown} + +\begin{itemize} +\item time taken by read \& write barriers +\item time spent committing \& aborting (maybe with different numbers + of threads) +\item time in GC +\end{itemize} + + +\subsection{Scaling} + +maybe some simple micro benchmarks with adaptable conflict rate + + +\subsection{Real-World Benchmarks} + +more real benchmarks comparing multiple implementations: \begin{itemize}[noitemsep] \item pypy \item pypy-jit @@ -928,7 +956,6 @@ \end{itemize} - \section{Related Work} From noreply at buildbot.pypy.org Thu May 15 17:33:36 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 May 2014 17:33:36 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_shelve Message-ID: <20140515153336.1C0791C3306@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71530:e0db2ffd444e Date: 2014-05-15 17:32 +0200 http://bitbucket.org/pypy/pypy/changeset/e0db2ffd444e/ Log: fix test_shelve diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -53,6 +53,7 @@ def __init__(self, filename, iflags, mode): res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.size = -1 if not res: self._raise_from_errno() self.ll_dbm = res @@ -67,8 +68,14 @@ raise error(os.strerror(ffi.errno)) raise error(lib.gdbm_strerror(lib.gdbm_errno)) + def __len__(self): + if self.size < 0: + self.size = len(self.keys()) + return self.size + def __setitem__(self, key, value): self._check_closed() + self._size = -1 r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), lib.GDBM_REPLACE) if r < 0: From noreply at buildbot.pypy.org Thu May 15 17:35:31 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 May 2014 17:35:31 +0200 (CEST) Subject: [pypy-commit] pypy default: ideally fix release for gdbm Message-ID: <20140515153531.23C791C3306@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r71531:4905ee88f2aa Date: 2014-05-15 17:34 +0200 http://bitbucket.org/pypy/pypy/changeset/4905ee88f2aa/ Log: ideally fix release for gdbm diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -74,6 +74,7 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) + subprocess.check_call([str(pypy_c), '-c', 'import gdbm']) if not withouttk: try: subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) From noreply at buildbot.pypy.org Thu May 15 17:47:34 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 May 2014 17:47:34 +0200 (CEST) Subject: [pypy-commit] pypy default: move low-level stuff out of the annotator Message-ID: <20140515154734.A8F191C02D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71532:7d6363bb626f Date: 2014-05-15 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/7d6363bb626f/ Log: move low-level stuff out of the annotator diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -8,15 +8,14 @@ SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) -from rpython.rtyper.llannotation import ( - SomeAddress, annotation_to_lltype, lltype_to_annotation, ll_to_annotation) from rpython.annotator.bookkeeper import ( - getbookkeeper, immutablevalue, BUILTIN_ANALYZERS, analyzer_for) + getbookkeeper, immutablevalue, BUILTIN_ANALYZERS, analyzer_for) from rpython.annotator import description from rpython.flowspace.model import Constant import rpython.rlib.rarithmetic import rpython.rlib.objectmodel + def constpropagate(func, args_s, s_result): """Returns s_result unless all args are constants, in which case the func() is called and a constant result is returned (it must be contained @@ -321,6 +320,7 @@ @analyzer_for(rpython.rlib.objectmodel.hlinvoke) def robjmodel_hlinvoke(s_repr, s_llcallable, *args_s): + from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError @@ -343,25 +343,6 @@ def robjmodel_keepalive_until_here(*args_s): return immutablevalue(None) - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) -def llmemory_cast_ptr_to_adr(s): - from rpython.rtyper.llannotation import SomeInteriorPtr - assert not isinstance(s, SomeInteriorPtr) - return SomeAddress() - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr) -def llmemory_cast_adr_to_ptr(s, s_type): - assert s_type.is_constant() - return SomePtr(s_type.const) - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int) -def llmemory_cast_adr_to_int(s, s_mode=None): - return SomeInteger() # xxx - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr) -def llmemory_cast_int_to_adr(s): - return SomeAddress() - try: import unicodedata except ImportError: @@ -375,131 +356,6 @@ def analyze(): return SomeOrderedDict(getbookkeeper().getdictdef()) - - -# annotation of low-level types -from rpython.rtyper.llannotation import SomePtr -from rpython.rtyper.lltypesystem import lltype - - at analyzer_for(lltype.malloc) -def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, - s_add_memory_pressure=None): - assert (s_n is None or s_n.knowntype == int - or issubclass(s_n.knowntype, rpython.rlib.rarithmetic.base_int)) - assert s_T.is_constant() - if s_n is not None: - n = 1 - else: - n = None - if s_zero: - assert s_zero.is_constant() - if s_flavor is None: - p = lltype.malloc(s_T.const, n) - r = SomePtr(lltype.typeOf(p)) - else: - assert s_flavor.is_constant() - assert s_track_allocation is None or s_track_allocation.is_constant() - assert (s_add_memory_pressure is None or - s_add_memory_pressure.is_constant()) - # not sure how to call malloc() for the example 'p' in the - # presence of s_extraargs - r = SomePtr(lltype.Ptr(s_T.const)) - return r - - at analyzer_for(lltype.free) -def free(s_p, s_flavor, s_track_allocation=None): - assert s_flavor.is_constant() - assert s_track_allocation is None or s_track_allocation.is_constant() - # same problem as in malloc(): some flavors are not easy to - # malloc-by-example - #T = s_p.ll_ptrtype.TO - #p = lltype.malloc(T, flavor=s_flavor.const) - #lltype.free(p, flavor=s_flavor.const) - - at analyzer_for(lltype.render_immortal) -def render_immortal(s_p, s_track_allocation=None): - assert s_track_allocation is None or s_track_allocation.is_constant() - - at analyzer_for(lltype.typeOf) -def typeOf(s_val): - lltype = annotation_to_lltype(s_val, info="in typeOf(): ") - return immutablevalue(lltype) - - at analyzer_for(lltype.cast_primitive) -def cast_primitive(T, s_v): - assert T.is_constant() - return ll_to_annotation(lltype.cast_primitive(T.const, annotation_to_lltype(s_v)._defl())) - - at analyzer_for(lltype.nullptr) -def nullptr(T): - assert T.is_constant() - p = lltype.nullptr(T.const) - return immutablevalue(p) - - at analyzer_for(lltype.cast_pointer) -def cast_pointer(PtrT, s_p): - assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p - assert PtrT.is_constant() - cast_p = lltype.cast_pointer(PtrT.const, s_p.ll_ptrtype._defl()) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.cast_opaque_ptr) -def cast_opaque_ptr(PtrT, s_p): - assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p - assert PtrT.is_constant() - cast_p = lltype.cast_opaque_ptr(PtrT.const, s_p.ll_ptrtype._defl()) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.direct_fieldptr) -def direct_fieldptr(s_p, s_fieldname): - assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p - assert s_fieldname.is_constant() - cast_p = lltype.direct_fieldptr(s_p.ll_ptrtype._example(), - s_fieldname.const) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.direct_arrayitems) -def direct_arrayitems(s_p): - assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p - cast_p = lltype.direct_arrayitems(s_p.ll_ptrtype._example()) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.direct_ptradd) -def direct_ptradd(s_p, s_n): - assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p - # don't bother with an example here: the resulting pointer is the same - return s_p - - at analyzer_for(lltype.cast_ptr_to_int) -def cast_ptr_to_int(s_ptr): # xxx - return SomeInteger() - - at analyzer_for(lltype.cast_int_to_ptr) -def cast_int_to_ptr(PtrT, s_int): - assert PtrT.is_constant() - return SomePtr(ll_ptrtype=PtrT.const) - - at analyzer_for(lltype.identityhash) -def identityhash(s_obj): - assert isinstance(s_obj, SomePtr) - return SomeInteger() - - at analyzer_for(lltype.getRuntimeTypeInfo) -def getRuntimeTypeInfo(T): - assert T.is_constant() - return immutablevalue(lltype.getRuntimeTypeInfo(T.const)) - - at analyzer_for(lltype.runtime_type_info) -def runtime_type_info(s_p): - assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p - return SomePtr(lltype.typeOf(lltype.runtime_type_info(s_p.ll_ptrtype._example()))) - - at analyzer_for(lltype.Ptr) -def constPtr(T): - assert T.is_constant() - return immutablevalue(lltype.Ptr(T.const)) - - #________________________________ # weakrefs @@ -514,88 +370,9 @@ "a weakref to cannot be None") return SomeWeakRef(s_obj.classdef) - -from rpython.rtyper.lltypesystem import llmemory - - at analyzer_for(llmemory.weakref_create) -def llweakref_create(s_obj): - if (not isinstance(s_obj, SomePtr) or - s_obj.ll_ptrtype.TO._gckind != 'gc'): - raise Exception("bad type for argument to weakref_create(): %r" % ( - s_obj,)) - return SomePtr(llmemory.WeakRefPtr) - - at analyzer_for(llmemory.weakref_deref ) -def llweakref_deref(s_ptrtype, s_wref): - if not (s_ptrtype.is_constant() and - isinstance(s_ptrtype.const, lltype.Ptr) and - s_ptrtype.const.TO._gckind == 'gc'): - raise Exception("weakref_deref() arg 1 must be a constant " - "ptr type, got %s" % (s_ptrtype,)) - if not (isinstance(s_wref, SomePtr) and - s_wref.ll_ptrtype == llmemory.WeakRefPtr): - raise Exception("weakref_deref() arg 2 must be a WeakRefPtr, " - "got %s" % (s_wref,)) - return SomePtr(s_ptrtype.const) - - at analyzer_for(llmemory.cast_ptr_to_weakrefptr) -def llcast_ptr_to_weakrefptr(s_ptr): - assert isinstance(s_ptr, SomePtr) - return SomePtr(llmemory.WeakRefPtr) - - at analyzer_for(llmemory.cast_weakrefptr_to_ptr) -def llcast_weakrefptr_to_ptr(s_ptrtype, s_wref): - if not (s_ptrtype.is_constant() and - isinstance(s_ptrtype.const, lltype.Ptr)): - raise Exception("cast_weakrefptr_to_ptr() arg 1 must be a constant " - "ptr type, got %s" % (s_ptrtype,)) - if not (isinstance(s_wref, SomePtr) and - s_wref.ll_ptrtype == llmemory.WeakRefPtr): - raise Exception("cast_weakrefptr_to_ptr() arg 2 must be a WeakRefPtr, " - "got %s" % (s_wref,)) - return SomePtr(s_ptrtype.const) - #________________________________ # non-gc objects @analyzer_for(rpython.rlib.objectmodel.free_non_gc_object) def robjmodel_free_non_gc_object(obj): pass - - -#_________________________________ -# memory address - - at analyzer_for(llmemory.raw_malloc) -def raw_malloc(s_size): - assert isinstance(s_size, SomeInteger) #XXX add noneg...? - return SomeAddress() - - at analyzer_for(llmemory.raw_malloc_usage) -def raw_malloc_usage(s_size): - assert isinstance(s_size, SomeInteger) #XXX add noneg...? - return SomeInteger(nonneg=True) - - at analyzer_for(llmemory.raw_free) -def raw_free(s_addr): - assert isinstance(s_addr, SomeAddress) - - at analyzer_for(llmemory.raw_memclear) -def raw_memclear(s_addr, s_int): - assert isinstance(s_addr, SomeAddress) - assert isinstance(s_int, SomeInteger) - - at analyzer_for(llmemory.raw_memcopy) -def raw_memcopy(s_addr1, s_addr2, s_int): - assert isinstance(s_addr1, SomeAddress) - assert isinstance(s_addr2, SomeAddress) - assert isinstance(s_int, SomeInteger) #XXX add noneg...? - - -#_________________________________ -# offsetof/sizeof - - - at analyzer_for(llmemory.offsetof) -def offsetof(TYPE, fldname): - return SomeInteger() diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -6,9 +6,9 @@ SomeBool, SomeInteger, SomeString, SomeFloat, SomeList, SomeDict, s_None, SomeObject, SomeInstance, SomeTuple, unionof, SomeUnicodeString, SomeType, AnnotatorError) -from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.listdef import ListDef from rpython.annotator.dictdef import DictDef +from rpython.rtyper import extregistry _annotation_cache = {} @@ -40,7 +40,7 @@ def _compute_annotation(t, bookkeeper=None): from rpython.rtyper.lltypesystem import lltype - from rpython.rtyper import extregistry + from rpython.rtyper.llannotation import lltype_to_annotation if isinstance(t, SomeObject): return t elif isinstance(t, lltype.LowLevelType): diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,45 +1,15 @@ """ Code for annotating low-level thingies. """ -from types import MethodType from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, - SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, - s_None, s_Bool, UnionError, AnnotatorError, SomeBool) + SomeUnicodeCodePoint, SomeInteger, SomeImpossibleValue, + s_None, s_Bool, UnionError, AnnotatorError) from rpython.rtyper.lltypesystem import lltype, llmemory - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - def getattr(self, s_attr): - assert s_attr.is_constant() - assert isinstance(s_attr, SomeString) - assert s_attr.const in llmemory.supported_access_types - return SomeTypedAddressAccess( - llmemory.supported_access_types[s_attr.const]) - getattr.can_only_throw = [] - - def bool(self): - return s_Bool - -class SomeTypedAddressAccess(SomeObject): - """This class is used to annotate the intermediate value that - appears in expressions of the form: - addr.signed[offset] and addr.signed[offset] = value - """ - - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False +from rpython.rtyper.lltypesystem.lltype import SomePtr +from rpython.rtyper.lltypesystem.llmemory import ( + SomeAddress, SomeTypedAddressAccess) class __extend__(pairtype(SomeAddress, SomeAddress)): @@ -98,69 +68,6 @@ raise UnionError(s_obj, s_addr) -class SomePtr(SomeObject): - knowntype = lltype._ptr - immutable = True - - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.Ptr) - self.ll_ptrtype = ll_ptrtype - - def can_be_none(self): - return False - - def getattr(self, s_attr): - from rpython.annotator.bookkeeper import getbookkeeper - if not s_attr.is_constant(): - raise AnnotatorError("getattr on ptr %r with non-constant " - "field-name" % self.ll_ptrtype) - example = self.ll_ptrtype._example() - try: - v = example._lookup_adtmeth(s_attr.const) - except AttributeError: - v = getattr(example, s_attr.const) - return ll_to_annotation(v) - else: - if isinstance(v, MethodType): - ll_ptrtype = lltype.typeOf(v.im_self) - assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) - return SomeLLADTMeth(ll_ptrtype, v.im_func) - return getbookkeeper().immutablevalue(v) - getattr.can_only_throw = [] - - def len(self): - from rpython.annotator.bookkeeper import getbookkeeper - length = self.ll_ptrtype._example()._fixedlength() - if length is None: - return SomeObject.len(self) - else: - return getbookkeeper().immutablevalue(length) - - def setattr(self, s_attr, s_value): # just doing checking - if not s_attr.is_constant(): - raise AnnotatorError("setattr on ptr %r with non-constant " - "field-name" % self.ll_ptrtype) - example = self.ll_ptrtype._example() - if getattr(example, s_attr.const) is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - setattr(example, s_attr.const, v_lltype._defl()) - - def call(self, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level fn ptr") - info = 'argument to ll function pointer call' - llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] - v = self.ll_ptrtype._example()(*llargs) - return ll_to_annotation(v) - - def bool(self): - result = SomeBool() - if self.is_constant(): - result.const = bool(self.const) - return result - - class SomeInteriorPtr(SomePtr): def __init__(self, ll_ptrtype): assert isinstance(ll_ptrtype, lltype.InteriorPtr) diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -5,8 +5,11 @@ # sizeof, offsetof import weakref +from rpython.annotator.bookkeeper import analyzer_for +from rpython.annotator.model import SomeInteger, SomeObject, SomeString, s_Bool from rpython.rlib.objectmodel import Symbolic, specialize from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lltype import SomePtr from rpython.tool.uid import uid from rpython.rlib.rarithmetic import is_valid_int from rpython.rtyper.extregistry import ExtRegistryEntry @@ -401,6 +404,11 @@ assert fldname in TYPE._flds return FieldOffset(TYPE, fldname) + at analyzer_for(offsetof) +def ann_offsetof(TYPE, fldname): + return SomeInteger() + + @specialize.memo() def itemoffsetof(TYPE, n=0): result = ArrayItemsOffset(TYPE) @@ -537,6 +545,37 @@ from rpython.rtyper.llannotation import SomeAddress return SomeAddress() +class SomeAddress(SomeObject): + immutable = True + + def can_be_none(self): + return False + + def is_null_address(self): + return self.is_immutable_constant() and not self.const + + def getattr(self, s_attr): + assert s_attr.is_constant() + assert isinstance(s_attr, SomeString) + assert s_attr.const in supported_access_types + return SomeTypedAddressAccess(supported_access_types[s_attr.const]) + getattr.can_only_throw = [] + + def bool(self): + return s_Bool + +class SomeTypedAddressAccess(SomeObject): + """This class is used to annotate the intermediate value that + appears in expressions of the form: + addr.signed[offset] and addr.signed[offset] = value + """ + + def __init__(self, type): + self.type = type + + def can_be_none(self): + return False + # ____________________________________________________________ class AddressAsInt(Symbolic): @@ -683,9 +722,22 @@ assert isinstance(lltype.typeOf(obj), lltype.Ptr) return obj._cast_to_adr() + at analyzer_for(cast_ptr_to_adr) +def ann_cast_ptr_to_adr(s): + from rpython.rtyper.llannotation import SomeInteriorPtr + assert not isinstance(s, SomeInteriorPtr) + return SomeAddress() + + def cast_adr_to_ptr(adr, EXPECTED_TYPE): return adr._cast_to_ptr(EXPECTED_TYPE) + at analyzer_for(cast_adr_to_ptr) +def ann_cast_adr_to_ptr(s, s_type): + assert s_type.is_constant() + return SomePtr(s_type.const) + + def cast_adr_to_int(adr, mode="emulated"): # The following modes are supported before translation (after # translation, it's all just a cast): @@ -702,6 +754,11 @@ res = cast(lltype.Signed, res) return res + at analyzer_for(cast_adr_to_int) +def ann_cast_adr_to_int(s, s_mode=None): + return SomeInteger() # xxx + + _NONGCREF = lltype.Ptr(lltype.OpaqueType('NONGCREF')) def cast_int_to_adr(int): if isinstance(int, AddressAsInt): @@ -713,6 +770,10 @@ ptr = ll2ctypes._int2obj[int]._as_ptr() return cast_ptr_to_adr(ptr) + at analyzer_for(cast_int_to_adr) +def ann_cast_int_to_adr(s): + return SomeAddress() + # ____________________________________________________________ # Weakrefs. # @@ -722,6 +783,7 @@ class _WeakRefType(lltype.ContainerType): _gckind = 'gc' + def __str__(self): return "WeakRef" @@ -736,6 +798,15 @@ assert ptarget return _wref(ptarget)._as_ptr() + at analyzer_for(weakref_create) +def ann_weakref_create(s_obj): + if (not isinstance(s_obj, SomePtr) or + s_obj.ll_ptrtype.TO._gckind != 'gc'): + raise Exception("bad type for argument to weakref_create(): %r" % ( + s_obj,)) + return SomePtr(WeakRefPtr) + + def weakref_deref(PTRTYPE, pwref): # pwref should not be a nullptr assert isinstance(PTRTYPE, lltype.Ptr) @@ -747,6 +818,20 @@ else: return cast_any_ptr(PTRTYPE, p) + at analyzer_for(weakref_deref) +def ann_weakref_deref(s_ptrtype, s_wref): + if not (s_ptrtype.is_constant() and + isinstance(s_ptrtype.const, lltype.Ptr) and + s_ptrtype.const.TO._gckind == 'gc'): + raise Exception("weakref_deref() arg 1 must be a constant " + "ptr type, got %s" % (s_ptrtype,)) + if not (isinstance(s_wref, SomePtr) and + s_wref.ll_ptrtype == WeakRefPtr): + raise Exception("weakref_deref() arg 2 must be a WeakRefPtr, " + "got %s" % (s_wref,)) + return SomePtr(s_ptrtype.const) + + class _wref(lltype._container): _gckind = 'gc' _TYPE = WeakRef @@ -789,6 +874,12 @@ else: return lltype.nullptr(WeakRef) + at analyzer_for(cast_ptr_to_weakrefptr) +def llcast_ptr_to_weakrefptr(s_ptr): + assert isinstance(s_ptr, SomePtr) + return SomePtr(WeakRefPtr) + + def cast_weakrefptr_to_ptr(PTRTYPE, pwref): assert lltype.typeOf(pwref) == WeakRefPtr if pwref: @@ -799,6 +890,18 @@ else: return lltype.nullptr(PTRTYPE.TO) + at analyzer_for(cast_weakrefptr_to_ptr) +def llcast_weakrefptr_to_ptr(s_ptrtype, s_wref): + if not (s_ptrtype.is_constant() and + isinstance(s_ptrtype.const, lltype.Ptr)): + raise Exception("cast_weakrefptr_to_ptr() arg 1 must be a constant " + "ptr type, got %s" % (s_ptrtype,)) + if not (isinstance(s_wref, SomePtr) and s_wref.ll_ptrtype == WeakRefPtr): + raise Exception("cast_weakrefptr_to_ptr() arg 2 must be a WeakRefPtr, " + "got %s" % (s_wref,)) + return SomePtr(s_ptrtype.const) + + class _gctransformed_wref(lltype._container): _gckind = 'gc' _TYPE = WeakRef @@ -820,6 +923,12 @@ raise NotImplementedError(size) return size._raw_malloc([], zero=False) + at analyzer_for(raw_malloc) +def ann_raw_malloc(s_size): + assert isinstance(s_size, SomeInteger) # XXX add noneg...? + return SomeAddress() + + def raw_free(adr): # try to free the whole object if 'adr' is the address of the header from rpython.memory.gcheader import GCHeaderBuilder @@ -832,6 +941,10 @@ assert isinstance(adr.ref()._obj, lltype._parentable) adr.ptr._as_obj()._free() + at analyzer_for(raw_free) +def ann_raw_free(s_addr): + assert isinstance(s_addr, SomeAddress) + def raw_malloc_usage(size): if isinstance(size, AddressOffset): # ouah @@ -839,6 +952,12 @@ size = convert_offset_to_int(size) return size + at analyzer_for(raw_malloc_usage) +def ann_raw_malloc_usage(s_size): + assert isinstance(s_size, SomeInteger) # XXX add noneg...? + return SomeInteger(nonneg=True) + + def raw_memclear(adr, size): if not isinstance(size, AddressOffset): raise NotImplementedError(size) @@ -846,17 +965,43 @@ zeroadr = size._raw_malloc([], zero=True) size.raw_memcopy(zeroadr, adr) + at analyzer_for(raw_memclear) +def ann_raw_memclear(s_addr, s_int): + assert isinstance(s_addr, SomeAddress) + assert isinstance(s_int, SomeInteger) + + def raw_memcopy(source, dest, size): assert lltype.typeOf(source) == Address - assert lltype.typeOf(dest) == Address + assert lltype.typeOf(dest) == Address size.raw_memcopy(source, dest) + at analyzer_for(raw_memcopy) +def ann_raw_memcopy(s_addr1, s_addr2, s_int): + assert isinstance(s_addr1, SomeAddress) + assert isinstance(s_addr2, SomeAddress) + assert isinstance(s_int, SomeInteger) # XXX add noneg...? + + def raw_memmove(source, dest, size): # for now let's assume that raw_memmove is the same as raw_memcopy, # when run on top of fake addresses, but we _free the source object raw_memcopy(source, dest, size) source.ptr._as_obj()._free() +class RawMemmoveEntry(ExtRegistryEntry): + _about_ = raw_memmove + + def compute_result_annotation(self, s_from, s_to, s_size): + assert isinstance(s_from, SomeAddress) + assert isinstance(s_to, SomeAddress) + assert isinstance(s_size, SomeInteger) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_list = hop.inputargs(Address, Address, lltype.Signed) + return hop.genop('raw_memmove', v_list) + def cast_any_ptr(EXPECTED_TYPE, ptr): # this is a generalization of the various cast_xxx_ptr() functions. PTRTYPE = lltype.typeOf(ptr) @@ -868,7 +1013,7 @@ ptr = cast_weakrefptr_to_ptr(None, ptr) return cast_any_ptr(EXPECTED_TYPE, ptr) elif (isinstance(EXPECTED_TYPE.TO, lltype.OpaqueType) or - isinstance(PTRTYPE.TO, lltype.OpaqueType)): + isinstance(PTRTYPE.TO, lltype.OpaqueType)): return lltype.cast_opaque_ptr(EXPECTED_TYPE, ptr) else: # regular case @@ -906,19 +1051,3 @@ setattr(dest._obj, name, llvalue) else: raise TypeError(T) - - -class RawMemmoveEntry(ExtRegistryEntry): - _about_ = raw_memmove - - def compute_result_annotation(self, s_from, s_to, s_size): - from rpython.annotator.model import SomeInteger - from rpython.rtyper.llannotation import SomeAddress - assert isinstance(s_from, SomeAddress) - assert isinstance(s_to, SomeAddress) - assert isinstance(s_size, SomeInteger) - - def specialize_call(self, hop): - hop.exception_cannot_occur() - v_list = hop.inputargs(Address, Address, lltype.Signed) - return hop.genop('raw_memmove', v_list) diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1,12 +1,15 @@ -from rpython.rlib.rarithmetic import (r_int, r_uint, intmask, r_singlefloat, - r_ulonglong, r_longlong, r_longfloat, r_longlonglong, - base_int, normalizedinttype, longlongmask, longlonglongmask) +from types import NoneType, MethodType +import weakref +from rpython.annotator.model import ( + SomeInteger, SomeBool, SomeObject, AnnotatorError) +from rpython.rlib.rarithmetic import ( + r_int, r_uint, intmask, r_singlefloat, r_ulonglong, r_longlong, + r_longfloat, r_longlonglong, base_int, normalizedinttype, longlongmask, + longlonglongmask, maxint, is_valid_int, is_emulated_long) from rpython.rlib.objectmodel import Symbolic from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder -from types import NoneType -from rpython.rlib.rarithmetic import maxint, is_valid_int, is_emulated_long -import weakref +from rpython.annotator.bookkeeper import analyzer_for, immutablevalue from rpython.rtyper.extregistry import ExtRegistryEntry class State(object): @@ -768,6 +771,12 @@ hints={'interior_ptr_type':True}) return R + at analyzer_for(Ptr) +def constPtr(T): + assert T.is_constant() + return immutablevalue(Ptr(T.const)) + + class InteriorPtr(LowLevelType): def __init__(self, PARENTTYPE, TO, offsets): self.PARENTTYPE = PARENTTYPE @@ -826,6 +835,13 @@ # in an illegal way! raise TypeError("typeOf(%r object)" % (tp.__name__,)) + at analyzer_for(typeOf) +def ann_typeOf(s_val): + from rpython.rtyper.llannotation import annotation_to_lltype + lltype = annotation_to_lltype(s_val, info="in typeOf(): ") + return immutablevalue(lltype) + + _to_primitive = { Char: chr, UniChar: unichr, @@ -858,6 +874,13 @@ return float(value) raise TypeError("unsupported cast") + at analyzer_for(cast_primitive) +def ann_cast_primitive(T, s_v): + from rpython.rtyper.llannotation import annotation_to_lltype, ll_to_annotation + assert T.is_constant() + return ll_to_annotation(cast_primitive(T.const, annotation_to_lltype(s_v)._defl())) + + def _cast_whatever(TGT, value): from rpython.rtyper.lltypesystem import llmemory, rffi ORIG = typeOf(value) @@ -930,12 +953,21 @@ raise InvalidCast(CURTYPE, PTRTYPE) return -u + def cast_pointer(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): raise TypeError("can only cast pointers to other pointers") return ptr._cast_to(PTRTYPE) + at analyzer_for(cast_pointer) +def ann_cast_pointer(PtrT, s_p): + assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p + assert PtrT.is_constant() + cast_p = cast_pointer(PtrT.const, s_p.ll_ptrtype._defl()) + return SomePtr(ll_ptrtype=typeOf(cast_p)) + + def cast_opaque_ptr(PTRTYPE, ptr): CURTYPE = typeOf(ptr) if not isinstance(CURTYPE, Ptr) or not isinstance(PTRTYPE, Ptr): @@ -982,6 +1014,14 @@ raise TypeError("invalid cast_opaque_ptr(): %r -> %r" % (CURTYPE, PTRTYPE)) + at analyzer_for(cast_opaque_ptr) +def ann_cast_opaque_ptr(PtrT, s_p): + assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p + assert PtrT.is_constant() + cast_p = cast_opaque_ptr(PtrT.const, s_p.ll_ptrtype._defl()) + return SomePtr(ll_ptrtype=typeOf(cast_p)) + + def direct_fieldptr(structptr, fieldname): """Get a pointer to a field in the struct. The resulting pointer is actually of type Ptr(FixedSizeArray(FIELD, 1)). @@ -997,6 +1037,15 @@ raise RuntimeError("direct_fieldptr: NULL argument") return _subarray._makeptr(structptr._obj, fieldname, structptr._solid) + at analyzer_for(direct_fieldptr) +def ann_direct_fieldptr(s_p, s_fieldname): + assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p + assert s_fieldname.is_constant() + cast_p = direct_fieldptr(s_p.ll_ptrtype._example(), + s_fieldname.const) + return SomePtr(ll_ptrtype=typeOf(cast_p)) + + def direct_arrayitems(arrayptr): """Get a pointer to the first item of the array. The resulting pointer is actually of type Ptr(FixedSizeArray(ITEM, 1)) but can @@ -1010,6 +1059,13 @@ raise RuntimeError("direct_arrayitems: NULL argument") return _subarray._makeptr(arrayptr._obj, 0, arrayptr._solid) + at analyzer_for(direct_arrayitems) +def ann_direct_arrayitems(s_p): + assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p + cast_p = direct_arrayitems(s_p.ll_ptrtype._example()) + return SomePtr(ll_ptrtype=typeOf(cast_p)) + + def direct_ptradd(ptr, n): """Shift a pointer forward or backward by n items. The pointer must have been built by direct_arrayitems(), or it must be directly a @@ -1024,6 +1080,13 @@ parent, base = parentlink(ptr._obj) return _subarray._makeptr(parent, base + n, ptr._solid) + at analyzer_for(direct_ptradd) +def ann_direct_ptradd(s_p, s_n): + assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p + # don't bother with an example here: the resulting pointer is the same + return s_p + + def parentlink(container): parent = container._parentstructure() if parent is not None: @@ -1415,6 +1478,69 @@ from rpython.rtyper.llannotation import SomePtr return SomePtr(typeOf(self.instance)) +class SomePtr(SomeObject): + knowntype = _ptr + immutable = True + + def __init__(self, ll_ptrtype): + assert isinstance(ll_ptrtype, Ptr) + self.ll_ptrtype = ll_ptrtype + + def can_be_none(self): + return False + + def getattr(self, s_attr): + from rpython.rtyper.llannotation import SomeLLADTMeth, ll_to_annotation + if not s_attr.is_constant(): + raise AnnotatorError("getattr on ptr %r with non-constant " + "field-name" % self.ll_ptrtype) + example = self.ll_ptrtype._example() + try: + v = example._lookup_adtmeth(s_attr.const) + except AttributeError: + v = getattr(example, s_attr.const) + return ll_to_annotation(v) + else: + if isinstance(v, MethodType): + ll_ptrtype = typeOf(v.im_self) + assert isinstance(ll_ptrtype, (Ptr, InteriorPtr)) + return SomeLLADTMeth(ll_ptrtype, v.im_func) + return immutablevalue(v) + getattr.can_only_throw = [] + + def len(self): + length = self.ll_ptrtype._example()._fixedlength() + if length is None: + return SomeObject.len(self) + else: + return immutablevalue(length) + + def setattr(self, s_attr, s_value): # just doing checking + from rpython.rtyper.llannotation import annotation_to_lltype + if not s_attr.is_constant(): + raise AnnotatorError("setattr on ptr %r with non-constant " + "field-name" % self.ll_ptrtype) + example = self.ll_ptrtype._example() + if getattr(example, s_attr.const) is not None: # ignore Void s_value + v_lltype = annotation_to_lltype(s_value) + setattr(example, s_attr.const, v_lltype._defl()) + + def call(self, args): + from rpython.rtyper.llannotation import annotation_to_lltype, ll_to_annotation + args_s, kwds_s = args.unpack() + if kwds_s: + raise Exception("keyword arguments to call to a low-level fn ptr") + info = 'argument to ll function pointer call' + llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] + v = self.ll_ptrtype._example()(*llargs) + return ll_to_annotation(v) + + def bool(self): + result = SomeBool() + if self.is_constant(): + result.const = bool(self.const) + return result + class _interior_ptr(_abstract_ptr): __slots__ = ('_parent', '_offsets') @@ -1994,6 +2120,32 @@ solid = immortal or flavor == 'raw' return _ptr(Ptr(T), o, solid) + at analyzer_for(malloc) +def ann_malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, + s_add_memory_pressure=None): + assert (s_n is None or s_n.knowntype == int + or issubclass(s_n.knowntype, base_int)) + assert s_T.is_constant() + if s_n is not None: + n = 1 + else: + n = None + if s_zero: + assert s_zero.is_constant() + if s_flavor is None: + p = malloc(s_T.const, n) + r = SomePtr(typeOf(p)) + else: + assert s_flavor.is_constant() + assert s_track_allocation is None or s_track_allocation.is_constant() + assert (s_add_memory_pressure is None or + s_add_memory_pressure.is_constant()) + # not sure how to call malloc() for the example 'p' in the + # presence of s_extraargs + r = SomePtr(Ptr(s_T.const)) + return r + + def free(p, flavor, track_allocation=True): if flavor.startswith('gc'): raise TypeError("gc flavor free") @@ -2004,6 +2156,17 @@ leakfinder.remember_free(p._obj0) p._obj0._free() + at analyzer_for(free) +def ann_free(s_p, s_flavor, s_track_allocation=None): + assert s_flavor.is_constant() + assert s_track_allocation is None or s_track_allocation.is_constant() + # same problem as in malloc(): some flavors are not easy to + # malloc-by-example + #T = s_p.ll_ptrtype.TO + #p = malloc(T, flavor=s_flavor.const) + #free(p, flavor=s_flavor.const) + + def render_immortal(p, track_allocation=True): T = typeOf(p) if not isinstance(T, Ptr) or p._togckind() != 'raw': @@ -2011,6 +2174,10 @@ if track_allocation: leakfinder.remember_free(p._obj0) + at analyzer_for(render_immortal) +def ann_render_immortal(s_p, s_track_allocation=None): + assert s_track_allocation is None or s_track_allocation.is_constant() + def _make_scoped_allocator(T): class ScopedAlloc: def __init__(self, n=None, zero=False): @@ -2050,9 +2217,17 @@ o = _func(TYPE, _name=name, **attrs) return _ptr(Ptr(TYPE), o) + def nullptr(T): return Ptr(T)._defl() + at analyzer_for(nullptr) +def ann_nullptr(T): + assert T.is_constant() + p = nullptr(T.const) + return immutablevalue(p) + + def opaqueptr(TYPE, name, **attrs): if not isinstance(TYPE, OpaqueType): raise TypeError("opaqueptr() for OpaqueTypes only") @@ -2063,6 +2238,11 @@ def cast_ptr_to_int(ptr): return ptr._cast_to_int() + at analyzer_for(cast_ptr_to_int) +def ann_cast_ptr_to_int(s_ptr): # xxx + return SomeInteger() + + def cast_int_to_ptr(PTRTYPE, oddint): if oddint == 0: return nullptr(PTRTYPE.TO) @@ -2070,6 +2250,12 @@ raise ValueError("only odd integers can be cast back to ptr") return _ptr(PTRTYPE, oddint, solid=True) + at analyzer_for(cast_int_to_ptr) +def ann_cast_int_to_ptr(PtrT, s_int): + assert PtrT.is_constant() + return SomePtr(ll_ptrtype=PtrT.const) + + def attachRuntimeTypeInfo(GCSTRUCT, funcptr=None, destrptr=None, customtraceptr=None): if not isinstance(GCSTRUCT, RttiStruct): @@ -2086,6 +2272,12 @@ GCSTRUCT._name) return _ptr(Ptr(RuntimeTypeInfo), GCSTRUCT._runtime_type_info) + at analyzer_for(getRuntimeTypeInfo) +def ann_getRuntimeTypeInfo(T): + assert T.is_constant() + return immutablevalue(getRuntimeTypeInfo(T.const)) + + def runtime_type_info(p): T = typeOf(p) if not isinstance(T, Ptr) or not isinstance(T.TO, RttiStruct): @@ -2104,6 +2296,12 @@ "should have been: %s" % (p, result2, result)) return result + at analyzer_for(runtime_type_info) +def ann_runtime_type_info(s_p): + assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p + return SomePtr(typeOf(runtime_type_info(s_p.ll_ptrtype._example()))) + + def identityhash(p): """Returns the lltype-level hash of the given GcStruct. Not for NULL. See rlib.objectmodel.compute_identity_hash() for more @@ -2112,6 +2310,12 @@ assert p return p._identityhash() + at analyzer_for(identityhash) +def ann_identityhash(s_obj): + assert isinstance(s_obj, SomePtr) + return SomeInteger() + + def identityhash_nocache(p): """Version of identityhash() to use from backends that don't care about caching.""" From noreply at buildbot.pypy.org Thu May 15 18:25:05 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Thu, 15 May 2014 18:25:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Broken Verion. Adds basic multi-threading through the rthred module. RThread module added, bootstrapper added to pass arguments to the thread, added ProcessWrapper to wrap squeak process objects and added stm_fork primitive Message-ID: <20140515162505.DD4781C02F3@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: stmgc-c7 Changeset: r832:39243de49282 Date: 2014-05-15 18:18 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/39243de49282/ Log: Broken Verion. Adds basic multi-threading through the rthred module. RThread module added, bootstrapper added to pass arguments to the thread, added ProcessWrapper to wrap squeak process objects and added stm_fork primitive diff too long, truncating to 2000 out of 306526 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu May 15 18:39:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 May 2014 18:39:52 +0200 (CEST) Subject: [pypy-commit] pypy default: split os.path.isdir and generic.isdir tests Message-ID: <20140515163952.1336B1C02D8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71533:88d226bccdd7 Date: 2014-05-15 19:38 +0300 http://bitbucket.org/pypy/pypy/changeset/88d226bccdd7/ Log: split os.path.isdir and generic.isdir tests diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -243,6 +243,20 @@ assert f() == False def test_os_path_isdir(): + if sys.platform != 'win32': + py.test.skip('use generic.isdir() instead') + directory = "./." + def fn(): + return os.path.isdir(directory) + f = compile(fn, []) + assert f() == True + directory = "some/random/name" + def fn(): + return os.path.isdir(directory) + f = compile(fn, []) + assert f() == False + +def test_generic_isdir(): # os.path.isdir is not rpython once pywin is installed (win32 specific) # genericpath.isdir is better. directory = "./." @@ -301,7 +315,7 @@ f1 = compile(does_stuff, [str]) if os.name == 'nt': assert f1(os.environ['TEMP']) == os.path.realpath(os.environ['TEMP']) - else: + else: assert f1('/tmp') == os.path.realpath('/tmp') def test_mkdir_rmdir(): From noreply at buildbot.pypy.org Thu May 15 20:37:40 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 May 2014 20:37:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't ever call SomeObject() Message-ID: <20140515183740.A0D511C3306@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71534:ad72a92bf0ac Date: 2014-05-15 19:36 +0100 http://bitbucket.org/pypy/pypy/changeset/ad72a92bf0ac/ Log: Don't ever call SomeObject() diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -834,5 +834,5 @@ else: basedef = s_wrf1.classdef.commonbase(s_wrf2.classdef) if basedef is None: # no common base class! complain... - return SomeObject() + raise UnionError(s_wrf1, s_wrf2) return SomeWeakRef(basedef) diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -211,7 +211,7 @@ def builtin_tuple(s_iterable): if isinstance(s_iterable, SomeTuple): return s_iterable - return SomeObject() + return AnnotatorError("tuple(): argument must be another tuple") def builtin_list(s_iterable): if isinstance(s_iterable, SomeList): diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -117,7 +117,7 @@ class __extend__(pairtype(SomeWeakValueDict, SomeWeakValueDict)): def union((s_wvd1, s_wvd2)): if s_wvd1.valueclassdef is not s_wvd2.valueclassdef: - return annmodel.SomeObject() # not the same class! complain... + raise UnionError(s_wvd1, s_wvd2, "not the same class!") s_key = annmodel.unionof(s_wvd1.s_key, s_wvd2.s_key) return SomeWeakValueDict(s_key, s_wvd1.valueclassdef) @@ -182,9 +182,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - return SomeObject() # not the same key class! complain... + raise UnionError(w_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - return SomeObject() # not the same value class! complain... + raise UnionError(w_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): From noreply at buildbot.pypy.org Thu May 15 21:37:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 May 2014 21:37:26 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup os.path.isdir by registering a specialcase for windows (ronan) Message-ID: <20140515193726.5D20D1C0190@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71535:523cda68ea97 Date: 2014-05-15 22:37 +0300 http://bitbucket.org/pypy/pypy/changeset/523cda68ea97/ Log: cleanup os.path.isdir by registering a specialcase for windows (ronan) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,7 +2,7 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat, genericpath +import sys, os, stat from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -522,8 +522,7 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - # os.path.isdir on win32 is not rpython when pywin32 installed - if genericpath.isdir(filepart) and case_ok(filepart): + if os.path.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -65,6 +65,13 @@ # (on CPython they are '==', but not identical either) return ctx.appcall(os.unlink, *args_w) +if os.name == 'nt': + @register_flow_sc(os.path.isdir) + def sc_os_path_isdir(ctx, *args_w): + # Cpython win32 reroutes os.path.isdir to nt._isdir + # which is not rpython + import genericpath + return ctx.appcall(genericpath.isdir, *args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -1,5 +1,5 @@ import py -import os, time, sys, genericpath +import os, time, sys from rpython.tool.udir import udir from rpython.rlib.rarithmetic import r_longlong from rpython.annotator import model as annmodel @@ -243,8 +243,6 @@ assert f() == False def test_os_path_isdir(): - if sys.platform != 'win32': - py.test.skip('use generic.isdir() instead') directory = "./." def fn(): return os.path.isdir(directory) @@ -256,20 +254,6 @@ f = compile(fn, []) assert f() == False -def test_generic_isdir(): - # os.path.isdir is not rpython once pywin is installed (win32 specific) - # genericpath.isdir is better. - directory = "./." - def fn(): - return genericpath.isdir(directory) - f = compile(fn, []) - assert f() == True - directory = "some/random/name" - def fn(): - return genericpath.isdir(directory) - f = compile(fn, []) - assert f() == False - def test_time_time(): import time def fn(): From noreply at buildbot.pypy.org Thu May 15 22:51:06 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 May 2014 22:51:06 +0200 (CEST) Subject: [pypy-commit] pypy default: kill unused noreturnvalue() Message-ID: <20140515205106.0A7CC1C02D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71536:5d771f55cfa8 Date: 2014-05-15 21:50 +0100 http://bitbucket.org/pypy/pypy/changeset/5d771f55cfa8/ Log: kill unused noreturnvalue() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -598,16 +598,13 @@ e.source = gather_error(self, graph, block, opindex) raise if resultcell is None: - resultcell = self.noreturnvalue(op) + resultcell = annmodel.s_ImpossibleValue elif resultcell == annmodel.s_ImpossibleValue: raise BlockedInference(self, op, opindex) # the operation cannot succeed assert isinstance(resultcell, annmodel.SomeObject) assert isinstance(op.result, Variable) self.setbinding(op.result, resultcell) # bind resultcell to op.result - def noreturnvalue(self, op): - return annmodel.s_ImpossibleValue # no return value (hook method) - class BlockedInference(Exception): """This exception signals the type inference engine that the situation From noreply at buildbot.pypy.org Fri May 16 05:15:29 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 16 May 2014 05:15:29 +0200 (CEST) Subject: [pypy-commit] pypy default: catch AnnotatorError one level higher Message-ID: <20140516031529.D18FF1C0190@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71537:16300e5795a1 Date: 2014-05-16 04:14 +0100 http://bitbucket.org/pypy/pypy/changeset/16300e5795a1/ Log: catch AnnotatorError one level higher diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -433,6 +433,10 @@ except annmodel.HarmlesslyBlocked: return + except annmodel.AnnotatorError as e: # note that UnionError is a subclass + e.source = gather_error(self, graph, block, i) + raise + else: # dead code removal: don't follow all exits if the exitswitch # is known @@ -580,23 +584,18 @@ def consider_op(self, block, opindex): op = block.operations[opindex] - try: - argcells = [self.binding(a) for a in op.args] + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - resultcell = op.consider(self, *argcells) - except annmodel.AnnotatorError as e: # note that UnionError is a subclass - graph = self.bookkeeper.position_key[0] - e.source = gather_error(self, graph, block, opindex) - raise + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, opindex) + resultcell = op.consider(self, *argcells) if resultcell is None: resultcell = annmodel.s_ImpossibleValue elif resultcell == annmodel.s_ImpossibleValue: From noreply at buildbot.pypy.org Fri May 16 09:08:44 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 May 2014 09:08:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add piratepad discussion Message-ID: <20140516070844.4A2E21D23D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5253:2789596150a0 Date: 2014-05-16 09:09 +0200 http://bitbucket.org/pypy/extradoc/changeset/2789596150a0/ Log: add piratepad discussion diff --git a/talk/dls2014/paper/discussion.txt b/talk/dls2014/paper/discussion.txt new file mode 100644 --- /dev/null +++ b/talk/dls2014/paper/discussion.txt @@ -0,0 +1,87 @@ +Claims we want to make: +- STM replaces GIL with overhead < 50% on single thread +- STM on 2-4 threads is faster than GIL, on some benchmarks +- (adding threads makes things at most ?x times slower) +- It works for real programs +- maybe: combination of GC & STM is essential? + + +Title suggestions: +Memory system assisted STM for dynamic language VMs? +Virtual memory assisted STM for dynamic language VMs? + + +What should be in? + +Alternative 1: +- STM-C7 library explanation (technical details) +- Some explanation of how STM replaces the GIL +- why STM-GC combination is good +- Mention the JIT, but don't describe integration +- Evaluate against Jython, CPython, etc. with JIT (maybe add duhton) +- Overhead breakdown + +Alternative 2: +- STM-C7 as a C library (tech details) +- Some explanation of how STM replaces the GIL +- why STM-GC combination is good +- use in Duhton & PyPy +- Evaluate duhton & pypy against their GIL versions, maybe also include CPython (we lose against Jython) +- Overhead breakdown +(then next paper would be integration with RPython & JIT and evaluation against Jython, CPython?) +(maybe people don't care enough if we just compare against our own stuff) + +Should we talk about atomic blocks much? + + +Abstract +======== + +We introduce a new STM that focuses on dynamic languages in order to replace the GIL, thereby enabling parallelism. Closely working together with the hardware and integrating with the GC allows us to lower the overhead of STM to levels where it makes sense to use it even on low numbers of CPUs. We show how our approach compares to the GIL and to fine-grained locking as used in Jython. +Furthermore, we introduce atomic blocks to Python as a better synchronization mechanism for many areas where one normally uses locks. + + +Introduction +============ + +Problem Statement +~~~~~~~~~~~~~~~~~ + +Contributions: +- Viable GIL replacement +- New STM system for dynamic langs +- STM-GC integration (overflow objects) +- Well performing STM system for low #CPUs + +Why STM and GC integration is a good idea +========================================= + + + +Background +========== + + +Method +====== + +Model +~~~~~ + +Implementation +~~~~~~~~~~~~~~ + +Detailed Implementation +~~~~~~~~~~~~~~~~~~~~~~~ + + +Evaluation +========== + + +Related Work +============ + + +Conclusions +=========== From noreply at buildbot.pypy.org Fri May 16 10:24:48 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 16 May 2014 10:24:48 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: adjust some details Message-ID: <20140516082448.3D9501D2BBA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5254:9d6ae8b1bf95 Date: 2014-05-16 10:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/9d6ae8b1bf95/ Log: adjust some details diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -205,7 +205,7 @@ semantics as using the GIL while still allowing the TM system to run transactions in parallel as an optimisation. - +\remi{maybe some more explanation of how exactly TM replaces the GIL} \subsection{Python} @@ -230,8 +230,8 @@ The second approach, \emph{multiprocessing}, uses multiple instances of the interpreter itself and runs them in separate OS processes. Here we actually get parallelism because we have one GIL per -interpreter, but of course we have the overhead of multiple processes -/ interpreters and also need to exchange data between them explicitly +interpreter, but of course we have the overhead of multiple processes~/ +interpreters and also need to exchange data between them explicitly and expensively. We focus on the \emph{threading} approach. This requires us to remove @@ -351,7 +351,7 @@ an object's offset inside a segment is the same in all segments, we can use this offset to reference objects. Because all segments are copies of each other, this \emph{Segment Offset ($SO$)} points to the -private version of an object in all threads\,/\,segments. To then +private version of an object in all threads~/ segments. To then translate this $SO$ to a real virtual memory address when used inside a thread, we need to add the thread's segment start address to the $SO$. The result of this operation is called a \emph{Linear Address @@ -392,11 +392,13 @@ \ref{fig:mmap()-Page-Mapping}, \lstinline!mmap()! creates a mapping between a range of virtual memory pages and virtual file pages. The virtual file pages are then mapped lazily by the kernel to real -physical memory pages. The mapping generated by \lstinline!mmap()! is -initially linear but can be changed arbitrarily. Especially, we can -remap so that multiple virtual memory pages map to a single virtual -file page. This is what we use to share memory between the segments -since then we also only require one page of physical memory. +physical memory pages. + +The mapping generated by \lstinline!mmap()! is initially linear but +can be changed arbitrarily. Especially, we can remap so that multiple +virtual memory pages map to a single virtual file page. This is what +we use to share memory between the segments, since we then only +require one page of physical memory for all of them. \begin{figure}[h] \begin{centering} @@ -409,16 +411,19 @@ As illustrated in Figure \ref{fig:Page-Remapping}, in our initial configuration (I) all segments are backed by their own range of -virtual file pages. This is the share-nothing configuration. +virtual file pages. This is the share-nothing configuration where +all threads have private versions of all objects. -We then designate segment 0 to be the \emph{Sharing-Segment}. No +We then designate segment~0 to be the \emph{sharing-segment}. No thread gets this segment assigned to it, it simply holds the pages -shared between all threads. So in (II), we remap all virtual pages of -the segments $>0$ to the file pages of our sharing-segment. This is -the fully-shared configuration. +shared between threads. So in step (II), we remap all virtual pages of +the segments~$>0$ to the file pages of our sharing-segment. This is +the fully-shared configuration where no threads have private versions +of any objects. -During runtime, we can then privatise single pages in segments $>0$ -again by remapping single pages as seen in (III). +During runtime, we can then privatise single pages in segments~$>0$ +again by remapping single pages as seen in (III). All objects in that +page now have a private version in some thread. Looking back at address translation for object references, we see now that this is actually a two-step process. First, $\%gs{::}SO$ gets @@ -426,14 +431,15 @@ CPU. Then, depending on the current mapping of virtual pages to file pages, these LAs can map to a single file page in the sharing-segment, or to privatised file pages in the corresponding segments. This -mapping is also performed efficiently by the CPU and can easily be -done on every access to an object. +mapping is also performed efficiently by CPUs that have a Memory +Management Unit (MMU) and can easily be done on every access to an +object. In summary, $\%gs{::}SO$ is translated efficiently by the CPU to either a physical memory location which is shared between several -threads/segments, or to a location in memory private to the -segment/thread. This makes the memory segmentation model for -isolation memory efficient again. +threads~/ segments or to a location in memory private to the segment~/ +thread. Page sharing makes the memory segmentation model for isolation +memory efficient again. \begin{figure}[h] \begin{centering} @@ -441,7 +447,7 @@ \par\end{centering} \protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to - segment 0, fully shared memory configuration. (III) privatise single + segment~0, fully shared memory configuration. (III) privatise single pages.\label{fig:Page-Remapping}} \end{figure} @@ -457,12 +463,12 @@ that all pages belonging to the object are private to our segment. To detect when to privatise pages, we use write barriers before every -write. When the barrier detects that the object is not in a private -page (or any pages that belong to the object), we remap and copy the -pages to the thread's segment. From now on, the translation of -$\%gs{::}SO$ in this particular segment will resolve to the private -version of the object. Note, the $SO$ used to reference the object does -not change during that process. +write to an object. When the barrier detects that the object is not in +a private page (or any pages that belong to the object), we remap and +copy the pages to the thread's segment. From now on, the translation +of $\%gs{::}SO$ in this particular thread will resolve to a private +version of the object automatically. Note that the $SO$ used to reference +the object does not change during that process. @@ -470,13 +476,15 @@ The job of barriers is to ensure complete isolation between transactions and to register the objects in the read or write set. We insert read -and write barriers before reading or modifying an object except if +and write barriers before reading or modifying an object, except if we statically know an object to be readable or writable already. \begin{description} \item [{Read~Barrier:}] Adds the object to the read set of the current transaction. Since our two-step address translation automatically resolves the reference to the private version of the object on every - access anyway, the read barrier does not need to do address translation anymore. + access anyway, the read barrier does not have the job to find the + private version. This job is fully performed by the CPU so that + read barriers have very little work to do. \item [{Write~Barrier:}] Adds the object to the read and write set of the current transaction and checks if all pages of the object are private, doing COW otherwise.\\ @@ -484,9 +492,10 @@ object at a time. To ensure this, we acquire a write lock on the object and also eagerly check for a write-write conflict at this point. If there is a conflict, we do some contention management to decide which - transaction has to wait or abort. Eagerly detecting this kind of conflict - is not inherent to our system, future experiments may show that we - want to lift this restriction. + transaction has to wait or abort.\\ + Eagerly detecting this kind of conflict is not inherent to our + system, future experiments may show that we want to lift this + restriction. \end{description} @@ -543,13 +552,16 @@ \subsubsection{Architecture} Our TM system is designed as a library that covers all aspects around -transactions and object management. The library consists of two parts: -(I) It provides a simple interface to starting and committing -transactions, as well as the required read and write barriers. (II) It -also includes a \emph{garbage collector (GC)} that is closely -integrated with the TM part (e.g. it shares the write barrier). The -close integration helps in order to know more about the lifetime of an -object, as will be explained in the following sections. +transactions and object management. It is designed for object-oriented +dynamic language VMs as a replacement for the GIL. + +The library consists of two parts: (I) It provides a simple interface +to starting and committing transactions, as well as the required read +and write barriers. (II) It also includes a \emph{garbage collector +(GC)} that is closely integrated with the TM part (e.g. it shares the +write barrier). The close integration helps in order to know more +about the lifetime of an object, as will be explained in the following +sections. \subsubsection{Application Programming Interface\label{sub:Application-Programming-Interfac}} @@ -571,8 +583,8 @@ \lstinline!stm_commit_transaction()! tries to commit the current transaction. \lstinline!stm_read()!, \lstinline!stm_write()! perform a read or a write barrier on an object and \lstinline!stm_allocate()! -allocates a new object with the specified size (must be a multiple of -16). \lstinline!STM_PUSH_ROOT()! and \lstinline!STM_POP_ROOT()! push +allocates a new object with the specified size. + \lstinline!STM_PUSH_ROOT()! and \lstinline!STM_POP_ROOT()! push and pop objects on the shadow stack~\footnote{A stack for pointers to GC objects that allows for precise garbage collection. All objects on that stack are never seen as garbage and are thus always kept @@ -583,8 +595,8 @@ require saving object references. The type \lstinline!object_t! is special as it causes the -compiler~\footnote{Clang 3.5 with some patches to this address-space - 256 feature} to make all accesses through it relative to the $\%gs$ +compiler~\footnote{Clang 3.5 with some patches to its address-space + 256 feature} to make all accesses through it relative to the $\%gs$ register. With exceptions, nearly all accesses to objects managed by the TM system should use this type so that the CPU will translate the reference to the right version of the object. @@ -594,7 +606,7 @@ On startup, we reserve a big range of virtual memory with a call to \lstinline!mmap()! and partition this space into $N+1$ segments. We -want to run $N$ threads in parallel while segment 0 is designated as +want to run $N$ threads in parallel while segment~0 is designated as the \emph{sharing-segment} that is never assigned to a thread. The next step involves using \lstinline!remap_file_pages()!, a Linux @@ -660,7 +672,8 @@ Garbage collection plays a big role in our TM system. The GC is generational and has two generations: the \emph{young} and the -\emph{old} generation. +\emph{old} generation. It is optimised for dynamic languages with +high allocation rates. The \textbf{young generation}, where objects are considered to be \emph{young} and reside in the \emph{Nursery}, is collected by @@ -679,7 +692,8 @@ the old object space with an \lstinline!overflow_number! globally unique to the current transaction. That way we can still detect in a medium-fast path inside barriers that the object still belongs to the -current transaction. +current transaction. \remi{so this is where we mention how the GC-STM +integration is useful. highlight more or move to own section?} The \textbf{old generation}, where objects are considered to be \emph{old} and never move again, is collected by \emph{major @@ -696,7 +710,8 @@ shadow stack using \lstinline!STM_PUSH_ROOT()!. That way, they will not be freed. And in case they were young, we get their new location in the old object space when getting them back from the stack using -\lstinline!STM_POP_ROOT()!. +\lstinline!STM_POP_ROOT()!. \remi{cite something which explains +shadowstacks in more detail} @@ -717,12 +732,12 @@ This area can be seen as a continuous array of bytes that is indexed from the start of the segment by an object's reference ($SO$) divided -by 16 (this is where the requirement of objects to be of at least 16 -bytes in size comes from). Instead of just setting the byte to -\lstinline!true! if the corresponding object was read, we set it to a -\lstinline!read_version! belonging to the transaction, which will be -incremented on each commit. Thereby, we can avoid resetting the bytes -to \lstinline!false! on commit and only need to do this every 255 +by 16 (this requires objects to have a size which is dividable by 16). +Instead of just setting the byte to \lstinline!true! if the +corresponding object was read, we set it to a \lstinline!read_version! +belonging to the transaction, which will be incremented on each +commit. Thereby, we can avoid resetting the bytes to +\lstinline!false! on commit and only need to do this every 255 transactions. The whole code for the barrier is easily optimisable for compilers as well as perfectly predictable for CPUs: @@ -736,8 +751,8 @@ \subsubsection{Write Barrier} The job of the write barrier is twofold: first, it serves as a write -barrier for the garbage collector and second, it supports -copy-on-write and adds objects to the write set of the transaction. +barrier for the garbage collector and second, it supports COW and adds +objects to the write set of the transaction. The \textbf{fast path} of the write barrier is very simple. We only need to check for the flag \lstinline!WRITE_BARRIER! in the object's @@ -789,29 +804,31 @@ reference to it that points to a young object. We then need to trace it during the next minor collection in order to mark the young object alive and to update its reference to the new location it gets moved -to. The check for \lstinline!is_overflow_obj()! tells us if the -object was actually created in this transaction. In that case, we do -not need to execute the following \emph{TM part}. We especially do -not need to privatise the page since no other transaction knows about -these ``old'' objects. +to. -For TM, we first perform a read barrier on the object. We then try to -acquire its write lock. \lstinline!write_locks! again is a simple -global array of bytes that is indexed with the $SO$ of the object -divided by 16. If we already own the lock, we are done. If someone -else owns the lock, we will do a write-write contention management -that will abort either us or the current owner of the object. If we -succeed in acquiring the lock using an atomic +The check for \lstinline!is_overflow_obj()! looks at the +\lstinline!overflow_number! and tells us if the object was actually +created in this transaction. In that case, we do not need to execute +the following \emph{TM part}. We especially do not need to privatise +its pages since no other transaction knows about these overflow +objects. Even if they reside in non-private pages, it is guaranteed +that no other transaction can have a reference to them. + +For the \emph{TM part}, we first perform a read barrier on the +object. We then try to acquire its write lock. \lstinline!write_locks! +again is a simple global array of bytes that is indexed with the $SO$ +of the object divided by 16. If we already own the lock, we are done. +If someone else owns the lock, we will do a write-write contention +management that will abort either us or the current owner of the +object. If we succeed in acquiring the lock using an atomic \lstinline!cmp_and_swap!, we need to add the object to the write set (a simple list called \lstinline!modified_old_objects!) and privatise all pages belonging to it (copy-on-write). In all cases, we remove the \lstinline!WRITE_BARRIER! flag from the object before we return. Thus, we never trigger the slow path again -before we do the next minor collection (also part of a commit) or we -start the next transaction. - - +before we do the next minor collection or we start the next +transaction (we always do a minor collection during a commit). \subsubsection{Abort} @@ -823,7 +840,7 @@ sharing-segment. What is left is to use \lstinline!longjmp()! to jump back to the location initialised by a \lstinline!setjmp()! in \lstinline!stm_start_transaction()!. Increasing the -\lstinline!read_version! is also done there. +\lstinline!read_version! for the next transaction is also done there. @@ -832,9 +849,9 @@ Committing a transaction needs a bit more work. First, we synchronise all threads so that the committing one is the only one running and all -the others are waiting in a safe point. We then go through the write +the others are waiting in safe points. We then go through the write set (\lstinline!modified_old_objects!) and check the corresponding -\lstinline!read_markers! in other threads/segments. If we detect a +\lstinline!read_markers! in other threads~/ segments. If we detect a read-write conflict, we do contention management to either abort us or the other transaction, or to simply wait a bit (see \ref{subsub:contentionmanagement}). @@ -844,7 +861,7 @@ threads. We also need to push overflow objects generated by minor collections to other segments, since they may reside partially in private pages. At that point we also get a new -\lstinline!overflow_number! by increasing a global one, so that it +\lstinline!overflow_number! by increasing a global one, so that it stays globally unique for each transaction. Increasing the \lstinline!read_version! is then done at the start of a new transaction. @@ -882,10 +899,10 @@ If we want to synchronise all threads, we can rely on this check being performed regularly. So what we do is to set the -\lstinline!nursery_end! to $0$ in all segments that we want to -synchronise. The mentioned check will then fail in those segments and -call the slow path. In \lstinline!allocate_slowpath! they can simply -check for this condition and enter a safe point. +\lstinline!nursery_end! to some small number in all segments that we +want to synchronise. The mentioned check will then fail in those +segments and call the slow path. In \lstinline!allocate_slowpath! +they can simply check for this condition and enter a safe point. For other synchronisation requirements, for example: \begin{itemize}[noitemsep] From noreply at buildbot.pypy.org Fri May 16 11:13:10 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 May 2014 11:13:10 +0200 (CEST) Subject: [pypy-commit] pypy default: convert VerificationError to ImportError Message-ID: <20140516091310.AE33F1D2499@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71538:e4355950858a Date: 2014-05-16 12:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e4355950858a/ Log: convert VerificationError to ImportError diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -36,9 +36,15 @@ void free(void*); ''') -lib = ffi.verify(''' -#include "gdbm.h" -''', libraries=['gdbm']) +try: + lib = ffi.verify(''' + #include "gdbm.h" + ''', libraries=['gdbm']) +except cffi.VerificationError as e: + # distutils does not preserve the actual message, + # but the verification is simple enough that the + # failure must be due to missing gdbm dev libs + raise ImportError('%s: %s' %(e.__class__.__name__, e)) class error(Exception): pass From noreply at buildbot.pypy.org Fri May 16 16:56:18 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 16 May 2014 16:56:18 +0200 (CEST) Subject: [pypy-commit] pypy default: pass just an operation object to consider_op() Message-ID: <20140516145618.34C571D29F4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71539:169815bdfefe Date: 2014-05-16 15:55 +0100 http://bitbucket.org/pypy/pypy/changeset/169815bdfefe/ Log: pass just an operation object to consider_op() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -397,16 +397,15 @@ return repr(graph) + blk + opid def flowin(self, graph, block): - #print 'Flowing', block, [self.binding(a) for a in block.inputargs] try: - for i in range(len(block.operations)): + for i, op in enumerate(block.operations): + self.bookkeeper.enter((graph, block, i)) try: - self.bookkeeper.enter((graph, block, i)) - self.consider_op(block, i) + self.consider_op(op) finally: self.bookkeeper.leave() - except BlockedInference, e: + except BlockedInference as e: if (e.op is block.operations[-1] and block.exitswitch == c_last_exception): # this is the case where the last operation of the block will @@ -428,6 +427,7 @@ # other cases are problematic (but will hopefully be solved # later by reflowing). Throw the BlockedInference up to # processblock(). + e.opindex = i raise except annmodel.HarmlesslyBlocked: @@ -582,8 +582,7 @@ #___ creating the annotations based on operations ______ - def consider_op(self, block, opindex): - op = block.operations[opindex] + def consider_op(self, op): argcells = [self.binding(a) for a in op.args] # let's be careful about avoiding propagated SomeImpossibleValues @@ -594,12 +593,12 @@ # boom -- in the assert of setbinding() for arg in argcells: if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) + raise BlockedInference(self, op, -1) resultcell = op.consider(self, *argcells) if resultcell is None: resultcell = annmodel.s_ImpossibleValue elif resultcell == annmodel.s_ImpossibleValue: - raise BlockedInference(self, op, opindex) # the operation cannot succeed + raise BlockedInference(self, op, -1) # the operation cannot succeed assert isinstance(resultcell, annmodel.SomeObject) assert isinstance(op.result, Variable) self.setbinding(op.result, resultcell) # bind resultcell to op.result From noreply at buildbot.pypy.org Fri May 16 17:24:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 16 May 2014 17:24:44 +0200 (CEST) Subject: [pypy-commit] pypy default: split large method Message-ID: <20140516152444.A7BEC1D23F2@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71540:1aeb1067e23a Date: 2014-05-16 16:24 +0100 http://bitbucket.org/pypy/pypy/changeset/1aeb1067e23a/ Log: split large method diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -447,11 +447,6 @@ exits = [link for link in exits if link.exitcase == s_exitswitch.const] - # mapping (exitcase, variable) -> s_annotation - # that can be attached to booleans, exitswitches - knowntypedata = getattr(self.bindings.get(block.exitswitch), - "knowntypedata", {}) - # filter out those exceptions which cannot # occour for this specific, typed operation. if block.exitswitch == c_last_exception: @@ -484,93 +479,12 @@ exits.append(link) candidates = [c for c in candidates if c not in covered] + # mapping (exitcase, variable) -> s_annotation + # that can be attached to booleans, exitswitches + knowntypedata = getattr(self.bindings.get(block.exitswitch), + "knowntypedata", {}) for link in exits: - in_except_block = False - - last_exception_var = link.last_exception # may be None for non-exception link - last_exc_value_var = link.last_exc_value # may be None for non-exception link - - if isinstance(link.exitcase, (types.ClassType, type)) \ - and issubclass(link.exitcase, py.builtin.BaseException): - assert last_exception_var and last_exc_value_var - last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase) - last_exception_object = annmodel.SomeType() - if isinstance(last_exception_var, Constant): - last_exception_object.const = last_exception_var.value - last_exception_object.is_type_of = [last_exc_value_var] - - if isinstance(last_exception_var, Variable): - self.setbinding(last_exception_var, last_exception_object) - if isinstance(last_exc_value_var, Variable): - self.setbinding(last_exc_value_var, last_exc_value_object) - - last_exception_object = annmodel.SomeType() - if isinstance(last_exception_var, Constant): - last_exception_object.const = last_exception_var.value - #if link.exitcase is Exception: - # last_exc_value_object = annmodel.SomeObject() - #else: - last_exc_value_vars = [] - in_except_block = True - - ignore_link = False - cells = [] - renaming = {} - for a,v in zip(link.args,link.target.inputargs): - renaming.setdefault(a, []).append(v) - for a,v in zip(link.args,link.target.inputargs): - if a == last_exception_var: - assert in_except_block - cells.append(last_exception_object) - elif a == last_exc_value_var: - assert in_except_block - cells.append(last_exc_value_object) - last_exc_value_vars.append(v) - else: - cell = self.binding(a) - if (link.exitcase, a) in knowntypedata: - knownvarvalue = knowntypedata[(link.exitcase, a)] - cell = pair(cell, knownvarvalue).improve() - # ignore links that try to pass impossible values - if cell == annmodel.s_ImpossibleValue: - ignore_link = True - - if hasattr(cell,'is_type_of'): - renamed_is_type_of = [] - for v in cell.is_type_of: - new_vs = renaming.get(v,[]) - renamed_is_type_of += new_vs - assert cell.knowntype is type - newcell = annmodel.SomeType() - if cell.is_constant(): - newcell.const = cell.const - cell = newcell - cell.is_type_of = renamed_is_type_of - - if hasattr(cell, 'knowntypedata'): - renamed_knowntypedata = {} - for (value, v), s in cell.knowntypedata.items(): - new_vs = renaming.get(v, []) - for new_v in new_vs: - renamed_knowntypedata[value, new_v] = s - assert isinstance(cell, annmodel.SomeBool) - newcell = annmodel.SomeBool() - if cell.is_constant(): - newcell.const = cell.const - cell = newcell - cell.set_knowntypedata(renamed_knowntypedata) - - cells.append(cell) - - if ignore_link: - continue - - if in_except_block: - last_exception_object.is_type_of = last_exc_value_vars - - self.links_followed[link] = True - self.addpendingblock(graph, link.target, cells) - + self.follow_link(graph, link, knowntypedata) if block in self.notify: # reflow from certain positions when this block is done for callback in self.notify[block]: @@ -579,6 +493,90 @@ else: callback() + def follow_link(self, graph, link, knowntypedata): + in_except_block = False + last_exception_var = link.last_exception # may be None for non-exception link + last_exc_value_var = link.last_exc_value # may be None for non-exception link + + if isinstance(link.exitcase, (types.ClassType, type)) \ + and issubclass(link.exitcase, py.builtin.BaseException): + assert last_exception_var and last_exc_value_var + last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase) + last_exception_object = annmodel.SomeType() + if isinstance(last_exception_var, Constant): + last_exception_object.const = last_exception_var.value + last_exception_object.is_type_of = [last_exc_value_var] + + if isinstance(last_exception_var, Variable): + self.setbinding(last_exception_var, last_exception_object) + if isinstance(last_exc_value_var, Variable): + self.setbinding(last_exc_value_var, last_exc_value_object) + + last_exception_object = annmodel.SomeType() + if isinstance(last_exception_var, Constant): + last_exception_object.const = last_exception_var.value + #if link.exitcase is Exception: + # last_exc_value_object = annmodel.SomeObject() + #else: + last_exc_value_vars = [] + in_except_block = True + + ignore_link = False + cells = [] + renaming = {} + for a, v in zip(link.args, link.target.inputargs): + renaming.setdefault(a, []).append(v) + for a, v in zip(link.args, link.target.inputargs): + if a == last_exception_var: + assert in_except_block + cells.append(last_exception_object) + elif a == last_exc_value_var: + assert in_except_block + cells.append(last_exc_value_object) + last_exc_value_vars.append(v) + else: + cell = self.binding(a) + if (link.exitcase, a) in knowntypedata: + knownvarvalue = knowntypedata[(link.exitcase, a)] + cell = pair(cell, knownvarvalue).improve() + # ignore links that try to pass impossible values + if cell == annmodel.s_ImpossibleValue: + ignore_link = True + + if hasattr(cell,'is_type_of'): + renamed_is_type_of = [] + for v in cell.is_type_of: + new_vs = renaming.get(v,[]) + renamed_is_type_of += new_vs + assert cell.knowntype is type + newcell = annmodel.SomeType() + if cell.is_constant(): + newcell.const = cell.const + cell = newcell + cell.is_type_of = renamed_is_type_of + + if hasattr(cell, 'knowntypedata'): + renamed_knowntypedata = {} + for (value, v), s in cell.knowntypedata.items(): + new_vs = renaming.get(v, []) + for new_v in new_vs: + renamed_knowntypedata[value, new_v] = s + assert isinstance(cell, annmodel.SomeBool) + newcell = annmodel.SomeBool() + if cell.is_constant(): + newcell.const = cell.const + cell = newcell + cell.set_knowntypedata(renamed_knowntypedata) + + cells.append(cell) + if ignore_link: + return + + if in_except_block: + last_exception_object.is_type_of = last_exc_value_vars + self.links_followed[link] = True + self.addpendingblock(graph, link.target, cells) + #___ creating the annotations based on operations ______ From noreply at buildbot.pypy.org Fri May 16 20:35:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 May 2014 20:35:51 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1769: trying to increase some limits in the SWEEPING phase. Now it Message-ID: <20140516183551.A0E101D23F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71541:8f47f3e1de79 Date: 2014-05-16 20:35 +0200 http://bitbucket.org/pypy/pypy/changeset/8f47f3e1de79/ Log: issue1769: trying to increase some limits in the SWEEPING phase. Now it should be guaranteed that most steps during this phase should walk (and possibly free) at least '3 * nursery_size' bytes. More precisely, that's all steps but two of them, at the end of the two halves of this phase. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1861,20 +1861,26 @@ #END MARKING elif self.gc_state == STATE_SWEEPING: # - # Walk all rawmalloced objects and free the ones that don't - # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. - limit = self.nursery_size // self.ac.page_size - remaining = self.free_unvisited_rawmalloc_objects_step(limit) - # - # Ask the ArenaCollection to visit a fraction of the objects. - # Free the ones that have not been visited above, and reset - # GCFLAG_VISITED on the others. Visit at most '3 * limit' - # pages minus the number of objects already visited above. - done = self.ac.mass_free_incremental(self._free_if_unvisited, - 2 * limit + remaining) + if self.raw_malloc_might_sweep.non_empty(): + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. + # This limit is conservatively high enough to guarantee that + # a total object size of at least '3 * nursery_size' bytes + # is processed. + limit = 3 * self.nursery_size // self.small_request_threshold + self.free_unvisited_rawmalloc_objects_step(limit) + done = False # the 2nd half below must still be done + else: + # Ask the ArenaCollection to visit a fraction of the objects. + # Free the ones that have not been visited above, and reset + # GCFLAG_VISITED on the others. Visit at most '3 * + # nursery_size' bytes. + limit = 3 * self.nursery_size // self.ac.page_size + done = self.ac.mass_free_incremental(self._free_if_unvisited, + limit) # XXX tweak the limits above # - if remaining > 0 and done: + if done: self.num_major_collects += 1 # # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. From noreply at buildbot.pypy.org Fri May 16 22:15:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 16 May 2014 22:15:10 +0200 (CEST) Subject: [pypy-commit] pypy py3k: disable for now, it's causing obscure crashes: Message-ID: <20140516201510.1E3A91D240F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71542:4e7d3471a1c7 Date: 2014-05-16 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/4e7d3471a1c7/ Log: disable for now, it's causing obscure crashes: https://bitbucket.org/pypy/pypy/issue/1773 diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py --- a/pypy/module/_continuation/test/test_zpickle.py +++ b/pypy/module/_continuation/test/test_zpickle.py @@ -215,6 +215,7 @@ ''', mod.__dict__) def test_pickle_continulet_real_subclass(self): + skip("XXX: triggers a crash: https://bitbucket.org/pypy/pypy/issue/1773") import types, sys mod = types.ModuleType('test_pickle_continulet_real_subclass') sys.modules['test_pickle_continulet_real_subclass'] = mod From noreply at buildbot.pypy.org Fri May 16 23:15:04 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 16 May 2014 23:15:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix suspect usage of AnnotatorError Message-ID: <20140516211504.0AF161D23F2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r71543:88e8c347e2e2 Date: 2014-05-16 23:14 +0200 http://bitbucket.org/pypy/pypy/changeset/88e8c347e2e2/ Log: Fix suspect usage of AnnotatorError diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -14,6 +14,7 @@ from rpython.flowspace.model import Constant import rpython.rlib.rarithmetic import rpython.rlib.objectmodel +from rpython.annotator.model import AnnotatorError def constpropagate(func, args_s, s_result): @@ -211,7 +212,7 @@ def builtin_tuple(s_iterable): if isinstance(s_iterable, SomeTuple): return s_iterable - return AnnotatorError("tuple(): argument must be another tuple") + raise AnnotatorError("tuple(): argument must be another tuple") def builtin_list(s_iterable): if isinstance(s_iterable, SomeList): From noreply at buildbot.pypy.org Sat May 17 01:04:43 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 01:04:43 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: A branch to implement dependent-typing in a less hackish way Message-ID: <20140516230443.CEE8D1D240F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71544:0ef88efb4a3d Date: 2014-05-16 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/0ef88efb4a3d/ Log: A branch to implement dependent-typing in a less hackish way From noreply at buildbot.pypy.org Sat May 17 01:04:45 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 01:04:45 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: create AnnotatedValue Message-ID: <20140516230445.107D61D240F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71545:b6f3e57eed80 Date: 2014-05-16 01:31 +0100 http://bitbucket.org/pypy/pypy/changeset/b6f3e57eed80/ Log: create AnnotatedValue diff --git a/rpython/annotator/value.py b/rpython/annotator/value.py new file mode 100644 --- /dev/null +++ b/rpython/annotator/value.py @@ -0,0 +1,9 @@ +""" AnnotatedValue """ + +class AnnotatedValue(object): + def __init__(self, value, annotation): + self.value = value + self.ann = annotation + + def __repr__(self): + return "AnnotatedValue(%s, %r)" % (self.value, self.ann) From noreply at buildbot.pypy.org Sat May 17 01:04:46 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 01:04:46 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: use annotated values in consider_op() Message-ID: <20140516230446.482821D240F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71546:bf8ea3ebe67a Date: 2014-05-16 20:01 +0100 http://bitbucket.org/pypy/pypy/changeset/bf8ea3ebe67a/ Log: use annotated values in consider_op() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -10,6 +10,7 @@ c_last_exception, checkgraph) from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature +from rpython.annotator.value import AnnotatedValue from rpython.annotator.bookkeeper import Bookkeeper import py @@ -240,6 +241,9 @@ else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) + def annvalue(self, arg): + return AnnotatedValue(arg, self.binding(arg)) + def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) @@ -577,11 +581,10 @@ self.links_followed[link] = True self.addpendingblock(graph, link.target, cells) - #___ creating the annotations based on operations ______ def consider_op(self, op): - argcells = [self.binding(a) for a in op.args] + argcells = [self.annvalue(a) for a in op.args] # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the @@ -590,7 +593,7 @@ # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... # boom -- in the assert of setbinding() for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): + if isinstance(arg.ann, annmodel.SomeImpossibleValue): raise BlockedInference(self, op, -1) resultcell = op.consider(self, *argcells) if resultcell is None: diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -90,10 +90,7 @@ return None def consider(self, annotator, *argcells): - consider_meth = getattr(annotator, 'consider_op_' + self.opname, None) - if not consider_meth: - raise Exception("unknown op: %r" % op) - return consider_meth(*argcells) + raise NotImplementedError class PureOperation(HLOperation): pure = True @@ -141,15 +138,17 @@ dispatch = 1 def consider(self, annotator, arg, *other_args): - impl = getattr(arg, self.opname) - return impl(*other_args) + impl = getattr(arg.ann, self.opname) + s_others = [x.ann for x in other_args] + return impl(*s_others) class DoubleDispatchMixin(object): dispatch = 2 def consider(self, annotator, arg1, arg2, *other_args): - impl = getattr(pair(arg1, arg2), self.opname) - return impl(*other_args) + impl = getattr(pair(arg1.ann, arg2.ann), self.opname) + s_others = [arg.ann for arg in other_args] + return impl(*s_others) def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): @@ -374,7 +373,7 @@ # XXX "contains" clash with SomeObject method def consider(self, annotator, seq, elem): - return seq.op_contains(elem) + return seq.ann.op_contains(elem.ann) class NewDict(HLOperation): @@ -391,7 +390,7 @@ canraise = [] def consider(self, annotator, *args): - return SomeTuple(items=args) + return SomeTuple(items=[arg.ann for arg in args]) class NewList(HLOperation): @@ -399,7 +398,7 @@ canraise = [] def consider(self, annotator, *args): - return annotator.bookkeeper.newlist(*args) + return annotator.bookkeeper.newlist(*[arg.ann for arg in args]) class Pow(PureOperation): From noreply at buildbot.pypy.org Sat May 17 01:17:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 17 May 2014 01:17:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k: workaround a py3k appdirect failure Message-ID: <20140516231722.2D2DE1C3272@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71547:5ee3c4bd1df3 Date: 2014-05-16 16:16 -0700 http://bitbucket.org/pypy/pypy/changeset/5ee3c4bd1df3/ Log: workaround a py3k appdirect failure diff --git a/pypy/module/zipimport/test/test_zipimport.py b/pypy/module/zipimport/test/test_zipimport.py --- a/pypy/module/zipimport/test/test_zipimport.py +++ b/pypy/module/zipimport/test/test_zipimport.py @@ -330,6 +330,9 @@ """ import os import zipimport + if self.appdirect: + # py3k's appdirect startup may populate _zip_directory_cache + zipimport._zip_directory_cache.clear() self.writefile("directory/package/__init__.py", "") importer = zipimport.zipimporter(self.zipfile + "/directory") l = [i for i in zipimport._zip_directory_cache] From noreply at buildbot.pypy.org Sat May 17 11:56:44 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:44 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Context.add() Message-ID: <20140517095644.9A30E1D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71548:3fd3d455666c Date: 2014-05-11 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/3fd3d455666c/ Log: Add Context.add() diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -210,6 +210,19 @@ self.capitals, rffi.cast(lltype.Signed, self.ctx.c_clamp), flags, traps)) + # Binary arithmetic functions + def binary_method(self, space, mpd_func, w_x, w_y): + from pypy.module._decimal.interp_decimal import W_Decimal + w_a, w_b = W_Decimal.convert_binop_raise(space, self, w_x, w_y) + w_result = W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) + return w_result + + def add_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qadd, w_x, w_y) + + def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) @@ -235,6 +248,8 @@ clear_flags=interp2app(W_Context.clear_flags_w), clear_traps=interp2app(W_Context.clear_traps_w), create_decimal=interp2app(W_Context.create_decimal_w), + # Operations + add=interp2app(W_Context.add_w), ) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -175,7 +175,7 @@ # Operations @staticmethod - def convert_op(space, w_value, context): + def convert_op(space, context, w_value): if isinstance(w_value, W_Decimal): return None, w_value elif space.isinstance_w(w_value, space.w_int): @@ -184,19 +184,34 @@ exact=True) return space.w_NotImplemented, None - def convert_binop(self, space, w_other, context): - w_err, w_a = W_Decimal.convert_op(space, self, context) + @staticmethod + def convert_binop(space, context, w_x, w_y): + w_err, w_a = W_Decimal.convert_op(space, context, w_x) if w_err: return w_err, None, None - w_err, w_b = W_Decimal.convert_op(space, w_other, context) + w_err, w_b = W_Decimal.convert_op(space, context, w_y) if w_err: return w_err, None, None return None, w_a, w_b + @staticmethod + def convert_binop_raise(space, context, w_x, w_y): + w_err, w_a = W_Decimal.convert_op(space, context, w_x) + if w_err: + raise oefmt(space.w_TypeError, + "conversion from %N to Decimal is not supported", + space.type(w_x)) + w_err, w_b = W_Decimal.convert_op(space, context, w_y) + if w_err: + raise oefmt(space.w_TypeError, + "conversion from %N to Decimal is not supported", + space.type(w_y)) + return w_a, w_b + def binary_number_method(self, space, mpd_func, w_other): context = interp_context.getcontext(space) - w_err, w_a, w_b = self.convert_binop(space, w_other, context) + w_err, w_a, w_b = W_Decimal.convert_binop(space, context, self, w_other) if w_err: return w_err w_result = W_Decimal.allocate(space) @@ -279,7 +294,7 @@ # sign try: - sign = space.int_w(w_sign) + sign = space.int_w(w_sign, allow_conversion=False) except OperationError as e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -437,3 +437,17 @@ for d, n, r in test_triples: assert str(round(Decimal(d), n)) == r + def test_add(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.add(Decimal(1), Decimal(1)) + assert c.add(1, 1) == d + assert c.add(Decimal(1), 1) == d + assert c.add(1, Decimal(1)) == d + raises(TypeError, c.add, '1', 1) + raises(TypeError, c.add, 1, '1') + + + From noreply at buildbot.pypy.org Sat May 17 11:56:45 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:45 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: More Context operations Message-ID: <20140517095645.C17241D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71549:4c93df5ea053 Date: 2014-05-11 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/4c93df5ea053/ Log: More Context operations diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -221,7 +221,12 @@ def add_w(self, space, w_x, w_y): return self.binary_method(space, rmpdec.mpd_qadd, w_x, w_y) - + def subtract_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qsub, w_x, w_y) + def multiply_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qmul, w_x, w_y) + def divide_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qdiv, w_x, w_y) def descr_new_context(space, w_subtype, __args__): @@ -250,6 +255,9 @@ create_decimal=interp2app(W_Context.create_decimal_w), # Operations add=interp2app(W_Context.add_w), + subtract=interp2app(W_Context.subtract_w), + multiply=interp2app(W_Context.multiply_w), + divide=interp2app(W_Context.divide_w), ) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -449,5 +449,38 @@ raises(TypeError, c.add, '1', 1) raises(TypeError, c.add, 1, '1') - + def test_subtract(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + c = Context() + d = c.subtract(Decimal(1), Decimal(2)) + assert c.subtract(1, 2) == d + assert c.subtract(Decimal(1), 2) == d + assert c.subtract(1, Decimal(2)) == d + raises(TypeError, c.subtract, '1', 2) + raises(TypeError, c.subtract, 1, '2') + + def test_multiply(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.multiply(Decimal(1), Decimal(2)) + assert c.multiply(1, 2)== d + assert c.multiply(Decimal(1), 2)== d + assert c.multiply(1, Decimal(2))== d + raises(TypeError, c.multiply, '1', 2) + raises(TypeError, c.multiply, 1, '2') + + def test_divide(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.divide(Decimal(1), Decimal(2)) + assert c.divide(1, 2)== d + assert c.divide(Decimal(1), 2)== d + assert c.divide(1, Decimal(2))== d + raises(TypeError, c.divide, '1', 2) + raises(TypeError, c.divide, 1, '2') From noreply at buildbot.pypy.org Sat May 17 11:56:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:47 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Decimal binary operations, and all comparisons. Message-ID: <20140517095647.06B751D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71550:282b135bd0ee Date: 2014-05-11 19:00 +0200 http://bitbucket.org/pypy/pypy/changeset/282b135bd0ee/ Log: Add Decimal binary operations, and all comparisons. diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -212,9 +212,9 @@ # Binary arithmetic functions def binary_method(self, space, mpd_func, w_x, w_y): - from pypy.module._decimal.interp_decimal import W_Decimal - w_a, w_b = W_Decimal.convert_binop_raise(space, self, w_x, w_y) - w_result = W_Decimal.allocate(space) + from pypy.module._decimal import interp_decimal + w_a, w_b = interp_decimal.convert_binop_raise(space, self, w_x, w_y) + w_result = interp_decimal.W_Decimal.allocate(space) with self.catch_status(space) as (ctx, status_ptr): mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) return w_result diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -161,72 +161,153 @@ return w_result def compare(self, space, w_other, op): - if not isinstance(w_other, W_Decimal): # So far - return space.w_NotImplemented + context = interp_context.getcontext(space) + w_err, w_other = convert_op(space, context, w_other) + if w_err: + return w_err with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: r = rmpdec.mpd_qcmp(self.mpd, w_other.mpd, status_ptr) + + if r > 0xFFFF: + # sNaNs or op={le,ge,lt,gt} always signal. + if (rmpdec.mpd_issnan(self.mpd) or rmpdec.mpd_issnan(w_other.mpd) + or (op not in ('eq', 'ne'))): + status = rffi.cast(lltype.Signed, status_ptr[0]) + context.addstatus(space, status) + # qNaN comparison with op={eq,ne} or comparison with + # InvalidOperation disabled. + if op == 'ne': + return space.w_True + else: + return space.w_False + if op == 'eq': return space.wrap(r == 0) + elif op == 'ne': + return space.wrap(r != 0) + elif op == 'le': + return space.wrap(r <= 0) + elif op == 'ge': + return space.wrap(r >= 0) + elif op == 'lt': + return space.wrap(r == -1) + elif op == 'gt': + return space.wrap(r == 1) else: return space.w_NotImplemented def descr_eq(self, space, w_other): return self.compare(space, w_other, 'eq') + def descr_ne(self, space, w_other): + return self.compare(space, w_other, 'ne') + def descr_lt(self, space, w_other): + return self.compare(space, w_other, 'lt') + def descr_le(self, space, w_other): + return self.compare(space, w_other, 'le') + def descr_gt(self, space, w_other): + return self.compare(space, w_other, 'gt') + def descr_ge(self, space, w_other): + return self.compare(space, w_other, 'ge') - # Operations - @staticmethod - def convert_op(space, context, w_value): - if isinstance(w_value, W_Decimal): - return None, w_value - elif space.isinstance_w(w_value, space.w_int): - value = space.bigint_w(w_value) - return None, decimal_from_bigint(space, None, value, context, - exact=True) - return space.w_NotImplemented, None + # Binary operations + + def descr_add(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qadd, self, w_other) + def descr_sub(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qsub, self, w_other) + def descr_mul(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qmul, self, w_other) + def descr_truediv(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qdiv, self, w_other) + def descr_floordiv(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qdivint, self, w_other) + def descr_mod(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qrem, self, w_other) + + def descr_radd(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qadd, w_other, self) + def descr_rsub(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qsub, w_other, self) + def descr_rmul(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qmul, w_other, self) + def descr_rtruediv(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qdiv, w_other, self) + def descr_rfloordiv(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qdivint, w_other, self) + def descr_rmod(self, space, w_other): + return binary_number_method(space, rmpdec.mpd_qrem, w_other, self) @staticmethod - def convert_binop(space, context, w_x, w_y): - w_err, w_a = W_Decimal.convert_op(space, context, w_x) + def divmod_impl(space, w_x, w_y): + context = interp_context.getcontext(space) + + w_err, w_a, w_b = convert_binop(space, context, w_x, w_y) if w_err: - return w_err, None, None - w_err, w_b = W_Decimal.convert_op(space, context, w_y) - if w_err: - return w_err, None, None - return None, w_a, w_b + return w_err + w_q = W_Decimal.allocate(space) + w_r = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qdivmod(w_q.mpd, w_r.mpd, w_a.mpd, w_b.mpd, + ctx, status_ptr) + return space.newtuple([w_q, w_r]) + + def descr_divmod(self, space, w_other): + return W_Decimal.divmod_impl(space, self, w_other) + def descr_rdivmod(self, space, w_other): + return W_Decimal.divmod_impl(space, w_other, self) @staticmethod - def convert_binop_raise(space, context, w_x, w_y): - w_err, w_a = W_Decimal.convert_op(space, context, w_x) - if w_err: - raise oefmt(space.w_TypeError, - "conversion from %N to Decimal is not supported", - space.type(w_x)) - w_err, w_b = W_Decimal.convert_op(space, context, w_y) - if w_err: - raise oefmt(space.w_TypeError, - "conversion from %N to Decimal is not supported", - space.type(w_y)) - return w_a, w_b - - def binary_number_method(self, space, mpd_func, w_other): + def pow_impl(space, w_base, w_exp, w_mod): context = interp_context.getcontext(space) - w_err, w_a, w_b = W_Decimal.convert_binop(space, context, self, w_other) + w_err, w_a, w_b = convert_binop(space, context, w_base, w_exp) if w_err: return w_err + + if not space.is_none(w_mod): + w_err, w_c = convert_op(space, context, w_mod) + if w_err: + return w_err + else: + w_c = None w_result = W_Decimal.allocate(space) with context.catch_status(space) as (ctx, status_ptr): - mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) + if w_c: + rmpdec.mpd_qpowmod(w_result.mpd, w_a.mpd, w_b.mpd, w_c.mpd, + ctx, status_ptr) + else: + rmpdec.mpd_qpow(w_result.mpd, w_a.mpd, w_b.mpd, + ctx, status_ptr) return w_result - def descr_add(self, space, w_other): - return self.binary_number_method(space, rmpdec.mpd_qadd, w_other) - def descr_sub(self, space, w_other): - return self.binary_number_method(space, rmpdec.mpd_qsub, w_other) - def descr_mul(self, space, w_other): - return self.binary_number_method(space, rmpdec.mpd_qmul, w_other) - def descr_truediv(self, space, w_other): - return self.binary_number_method(space, rmpdec.mpd_qdiv, w_other) + def descr_pow(self, space, w_other, w_mod=None): + return W_Decimal.pow_impl(space, self, w_other, w_mod) + def descr_rpow(self, space, w_other): + return W_Decimal.pow_impl(space, w_other, self, None) + + # Unary operations + def unary_number_method(self, space, mpd_func): + context = interp_context.getcontext(space) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, self.mpd, ctx, status_ptr) + return w_result + + def descr_neg(self, space): + return self.unary_number_method(space, rmpdec.mpd_qminus) + def descr_pos(self, space): + return self.unary_number_method(space, rmpdec.mpd_qplus) + def descr_abs(self, space): + return self.unary_number_method(space, rmpdec.mpd_qabs) + + def copy_sign_w(self, space, w_other, w_context=None): + context = convert_context(space, w_context) + w_other = convert_op_raise(space, context, w_other) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qcopy_sign(w_result.mpd, self.mpd, w_other.mpd, + ctx, status_ptr) + return w_result # Boolean functions def is_qnan_w(self, space): @@ -235,6 +316,62 @@ return space.wrap(bool(rmpdec.mpd_isinfinite(self.mpd))) +# Helper functions for arithmetic conversions +def convert_op(space, context, w_value): + if isinstance(w_value, W_Decimal): + return None, w_value + elif space.isinstance_w(w_value, space.w_int): + value = space.bigint_w(w_value) + return None, decimal_from_bigint(space, None, value, context, + exact=True) + return space.w_NotImplemented, None + +def convert_op_raise(space, context, w_x): + w_err, w_a = convert_op(space, context, w_x) + if w_err: + raise oefmt(space.w_TypeError, + "conversion from %N to Decimal is not supported", + space.type(w_x)) + return w_a + +def convert_binop(space, context, w_x, w_y): + w_err, w_a = convert_op(space, context, w_x) + if w_err: + return w_err, None, None + w_err, w_b = convert_op(space, context, w_y) + if w_err: + return w_err, None, None + return None, w_a, w_b + +def convert_binop_raise(space, context, w_x, w_y): + w_err, w_a = convert_op(space, context, w_x) + if w_err: + raise oefmt(space.w_TypeError, + "conversion from %N to Decimal is not supported", + space.type(w_x)) + w_err, w_b = convert_op(space, context, w_y) + if w_err: + raise oefmt(space.w_TypeError, + "conversion from %N to Decimal is not supported", + space.type(w_y)) + return w_a, w_b + +def binary_number_method(space, mpd_func, w_x, w_y): + context = interp_context.getcontext(space) + + w_err, w_a, w_b = convert_binop(space, context, w_x, w_y) + if w_err: + return w_err + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) + return w_result + +def convert_context(space, w_context): + if w_context is None: + return interp_context.getcontext(space) + return space.interp_w(interp_context.W_Context, w_context) + # Constructors def decimal_from_ssize(space, w_subtype, value, context, exact=True): w_result = W_Decimal.allocate(space, w_subtype) @@ -473,13 +610,37 @@ __floor__ = interp2app(W_Decimal.descr_floor), __ceil__ = interp2app(W_Decimal.descr_ceil), __round__ = interp2app(W_Decimal.descr_round), + # __eq__ = interp2app(W_Decimal.descr_eq), + __ne__ = interp2app(W_Decimal.descr_ne), + __le__ = interp2app(W_Decimal.descr_le), + __ge__ = interp2app(W_Decimal.descr_ge), + __lt__ = interp2app(W_Decimal.descr_lt), + __gt__ = interp2app(W_Decimal.descr_gt), + # + __pos__ = interp2app(W_Decimal.descr_pos), + __neg__ = interp2app(W_Decimal.descr_neg), + __abs__ = interp2app(W_Decimal.descr_abs), # __add__ = interp2app(W_Decimal.descr_add), __sub__ = interp2app(W_Decimal.descr_sub), __mul__ = interp2app(W_Decimal.descr_mul), __truediv__ = interp2app(W_Decimal.descr_truediv), + __floordiv__ = interp2app(W_Decimal.descr_floordiv), + __mod__ = interp2app(W_Decimal.descr_mod), + __divmod__ = interp2app(W_Decimal.descr_divmod), + __pow__ = interp2app(W_Decimal.descr_pow), # + __radd__ = interp2app(W_Decimal.descr_radd), + __rsub__ = interp2app(W_Decimal.descr_rsub), + __rmul__ = interp2app(W_Decimal.descr_rmul), + __rtruediv__ = interp2app(W_Decimal.descr_rtruediv), + __rfloordiv__ = interp2app(W_Decimal.descr_rfloordiv), + __rmod__ = interp2app(W_Decimal.descr_rmod), + __rdivmod__ = interp2app(W_Decimal.descr_rdivmod), + __rpow__ = interp2app(W_Decimal.descr_rpow), + # + copy_sign = interp2app(W_Decimal.copy_sign_w), is_qnan = interp2app(W_Decimal.is_qnan_w), is_infinite = interp2app(W_Decimal.is_infinite_w), ) diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -1,3 +1,6 @@ +from pypy.interpreter import gateway +import random + class AppTestContext: spaceconfig = dict(usemodules=('_decimal',)) @@ -6,6 +9,10 @@ cls.w_decimal = space.call_function(space.builtin.get('__import__'), space.wrap("_decimal")) cls.w_Decimal = space.getattr(cls.w_decimal, space.wrap("Decimal")) + def random_float(space): + f = random.expovariate(0.01) * (random.random() * 2.0 - 1.0) + return space.wrap(f) + cls.w_random_float = space.wrap(gateway.interp2app(random_float)) def test_context_repr(self): c = self.decimal.DefaultContext.copy() @@ -31,3 +38,73 @@ "flags=[], traps=[])" assert s == t + def test_explicit_context_create_from_float(self): + Decimal = self.decimal.Decimal + + nc = self.decimal.Context() + r = nc.create_decimal(0.1) + assert type(r) is Decimal + assert str(r) == '0.1000000000000000055511151231' + assert nc.create_decimal(float('nan')).is_qnan() + assert nc.create_decimal(float('inf')).is_infinite() + assert nc.create_decimal(float('-inf')).is_infinite() + assert (str(nc.create_decimal(float('nan'))) == + str(nc.create_decimal('NaN'))) + assert (str(nc.create_decimal(float('inf'))) == + str(nc.create_decimal('Infinity'))) + assert (str(nc.create_decimal(float('-inf'))) == + str(nc.create_decimal('-Infinity'))) + assert (str(nc.create_decimal(float('-0.0'))) == + str(nc.create_decimal('-0'))) + nc.prec = 100 + for i in range(200): + x = self.random_float() + assert x == float(nc.create_decimal(x)) # roundtrip + + def test_add(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.add(Decimal(1), Decimal(1)) + assert c.add(1, 1) == d + assert c.add(Decimal(1), 1) == d + assert c.add(1, Decimal(1)) == d + raises(TypeError, c.add, '1', 1) + raises(TypeError, c.add, 1, '1') + + def test_subtract(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.subtract(Decimal(1), Decimal(2)) + assert c.subtract(1, 2) == d + assert c.subtract(Decimal(1), 2) == d + assert c.subtract(1, Decimal(2)) == d + raises(TypeError, c.subtract, '1', 2) + raises(TypeError, c.subtract, 1, '2') + + def test_multiply(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.multiply(Decimal(1), Decimal(2)) + assert c.multiply(1, 2)== d + assert c.multiply(Decimal(1), 2)== d + assert c.multiply(1, Decimal(2))== d + raises(TypeError, c.multiply, '1', 2) + raises(TypeError, c.multiply, 1, '2') + + def test_divide(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.divide(Decimal(1), Decimal(2)) + assert c.divide(1, 2)== d + assert c.divide(Decimal(1), 2)== d + assert c.divide(1, Decimal(2))== d + raises(TypeError, c.divide, '1', 2) + raises(TypeError, c.divide, 1, '2') diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -293,29 +293,6 @@ assert str(nc.create_decimal(Decimal('NaN12345'))) == 'NaN' assert nc.flags[InvalidOperation] - def test_explicit_context_create_from_float(self): - Decimal = self.decimal.Decimal - - nc = self.decimal.Context() - r = nc.create_decimal(0.1) - assert type(r) is Decimal - assert str(r) == '0.1000000000000000055511151231' - assert nc.create_decimal(float('nan')).is_qnan() - assert nc.create_decimal(float('inf')).is_infinite() - assert nc.create_decimal(float('-inf')).is_infinite() - assert (str(nc.create_decimal(float('nan'))) == - str(nc.create_decimal('NaN'))) - assert (str(nc.create_decimal(float('inf'))) == - str(nc.create_decimal('Infinity'))) - assert (str(nc.create_decimal(float('-inf'))) == - str(nc.create_decimal('-Infinity'))) - assert (str(nc.create_decimal(float('-0.0'))) == - str(nc.create_decimal('-0'))) - nc.prec = 100 - for i in range(200): - x = self.random_float() - assert x == float(nc.create_decimal(x)) # roundtrip - def test_operations(self): Decimal = self.decimal.Decimal @@ -437,50 +414,301 @@ for d, n, r in test_triples: assert str(round(Decimal(d), n)) == r - def test_add(self): + def test_addition(self): Decimal = self.decimal.Decimal - Context = self.decimal.Context - c = Context() - d = c.add(Decimal(1), Decimal(1)) - assert c.add(1, 1) == d - assert c.add(Decimal(1), 1) == d - assert c.add(1, Decimal(1)) == d - raises(TypeError, c.add, '1', 1) - raises(TypeError, c.add, 1, '1') + d1 = Decimal('-11.1') + d2 = Decimal('22.2') - def test_subtract(self): + #two Decimals + assert d1+d2 == Decimal('11.1') + assert d2+d1 == Decimal('11.1') + + #with other type, left + c = d1 + 5 + assert c == Decimal('-6.1') + assert type(c) == type(d1) + + #with other type, right + c = 5 + d1 + assert c == Decimal('-6.1') + assert type(c) == type(d1) + + #inline with decimal + d1 += d2 + assert d1 == Decimal('11.1') + + #inline with other type + d1 += 5 + assert d1 == Decimal('16.1') + + def test_subtraction(self): Decimal = self.decimal.Decimal - Context = self.decimal.Context - c = Context() - d = c.subtract(Decimal(1), Decimal(2)) - assert c.subtract(1, 2) == d - assert c.subtract(Decimal(1), 2) == d - assert c.subtract(1, Decimal(2)) == d - raises(TypeError, c.subtract, '1', 2) - raises(TypeError, c.subtract, 1, '2') + d1 = Decimal('-11.1') + d2 = Decimal('22.2') - def test_multiply(self): + #two Decimals + assert d1-d2 == Decimal('-33.3') + assert d2-d1 == Decimal('33.3') + + #with other type, left + c = d1 - 5 + assert c == Decimal('-16.1') + assert type(c) == type(d1) + + #with other type, right + c = 5 - d1 + assert c == Decimal('16.1') + assert type(c) == type(d1) + + #inline with decimal + d1 -= d2 + assert d1 == Decimal('-33.3') + + #inline with other type + d1 -= 5 + assert d1 == Decimal('-38.3') + + def test_multiplication(self): Decimal = self.decimal.Decimal - Context = self.decimal.Context - c = Context() - d = c.multiply(Decimal(1), Decimal(2)) - assert c.multiply(1, 2)== d - assert c.multiply(Decimal(1), 2)== d - assert c.multiply(1, Decimal(2))== d - raises(TypeError, c.multiply, '1', 2) - raises(TypeError, c.multiply, 1, '2') + d1 = Decimal('-5') + d2 = Decimal('3') - def test_divide(self): + #two Decimals + assert d1*d2 == Decimal('-15') + assert d2*d1 == Decimal('-15') + + #with other type, left + c = d1 * 5 + assert c == Decimal('-25') + assert type(c) == type(d1) + + #with other type, right + c = 5 * d1 + assert c == Decimal('-25') + assert type(c) == type(d1) + + #inline with decimal + d1 *= d2 + assert d1 == Decimal('-15') + + #inline with other type + d1 *= 5 + assert d1 == Decimal('-75') + + def test_division(self): Decimal = self.decimal.Decimal - Context = self.decimal.Context - c = Context() - d = c.divide(Decimal(1), Decimal(2)) - assert c.divide(1, 2)== d - assert c.divide(Decimal(1), 2)== d - assert c.divide(1, Decimal(2))== d - raises(TypeError, c.divide, '1', 2) - raises(TypeError, c.divide, 1, '2') + d1 = Decimal('-5') + d2 = Decimal('2') + + #two Decimals + assert d1/d2 == Decimal('-2.5') + assert d2/d1 == Decimal('-0.4') + + #with other type, left + c = d1 / 4 + assert c == Decimal('-1.25') + assert type(c) == type(d1) + + #with other type, right + c = 4 / d1 + assert c == Decimal('-0.8') + assert type(c) == type(d1) + + #inline with decimal + d1 /= d2 + assert d1 == Decimal('-2.5') + + #inline with other type + d1 /= 4 + assert d1 == Decimal('-0.625') + + def test_floor_division(self): + Decimal = self.decimal.Decimal + + d1 = Decimal('5') + d2 = Decimal('2') + + #two Decimals + assert d1//d2 == Decimal('2') + assert d2//d1 == Decimal('0') + + #with other type, left + c = d1 // 4 + assert c == Decimal('1') + assert type(c) == type(d1) + + #with other type, right + c = 7 // d1 + assert c == Decimal('1') + assert type(c) == type(d1) + + #inline with decimal + d1 //= d2 + assert d1 == Decimal('2') + + #inline with other type + d1 //= 2 + assert d1 == Decimal('1') + + def test_powering(self): + Decimal = self.decimal.Decimal + + d1 = Decimal('5') + d2 = Decimal('2') + + #two Decimals + assert d1**d2 == Decimal('25') + assert d2**d1 == Decimal('32') + + #with other type, left + c = d1 ** 4 + assert c == Decimal('625') + assert type(c) == type(d1) + + #with other type, right + c = 7 ** d1 + assert c == Decimal('16807') + assert type(c) == type(d1) + + #inline with decimal + d1 **= d2 + assert d1 == Decimal('25') + + #inline with other type + d1 **= 4 + assert d1 == Decimal('390625') + + def test_module(self): + Decimal = self.decimal.Decimal + + d1 = Decimal('5') + d2 = Decimal('2') + + #two Decimals + assert d1%d2 == Decimal('1') + assert d2%d1 == Decimal('2') + + #with other type, left + c = d1 % 4 + assert c == Decimal('1') + assert type(c) == type(d1) + + #with other type, right + c = 7 % d1 + assert c == Decimal('2') + assert type(c) == type(d1) + + #inline with decimal + d1 %= d2 + assert d1 == Decimal('1') + + #inline with other type + d1 %= 4 + assert d1 == Decimal('1') + + def test_floor_div_module(self): + Decimal = self.decimal.Decimal + + d1 = Decimal('5') + d2 = Decimal('2') + + #two Decimals + (p, q) = divmod(d1, d2) + assert p == Decimal('2') + assert q == Decimal('1') + assert type(p) == type(d1) + assert type(q) == type(d1) + + #with other type, left + (p, q) = divmod(d1, 4) + assert p == Decimal('1') + assert q == Decimal('1') + assert type(p) == type(d1) + assert type(q) == type(d1) + + #with other type, right + (p, q) = divmod(7, d1) + assert p == Decimal('1') + assert q == Decimal('2') + assert type(p) == type(d1) + assert type(q) == type(d1) + + def test_unary_operators(self): + Decimal = self.decimal.Decimal + + assert +Decimal(45) == Decimal(+45) + assert -Decimal(45) == Decimal(-45) + assert abs(Decimal(45)) == abs(Decimal(-45)) + + def test_nan_comparisons(self): + import operator + # comparisons involving signaling nans signal InvalidOperation + + # order comparisons (<, <=, >, >=) involving only quiet nans + # also signal InvalidOperation + + # equality comparisons (==, !=) involving only quiet nans + # don't signal, but return False or True respectively. + Decimal = self.decimal.Decimal + InvalidOperation = self.decimal.InvalidOperation + Overflow = self.decimal.Overflow + DivisionByZero = self.decimal.DivisionByZero + localcontext = self.decimal.localcontext + + self.decimal.getcontext().traps[InvalidOperation] = False + self.decimal.getcontext().traps[Overflow] = False + self.decimal.getcontext().traps[DivisionByZero] = False + + n = Decimal('NaN') + s = Decimal('sNaN') + i = Decimal('Inf') + f = Decimal('2') + + qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n) + snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s) + order_ops = operator.lt, operator.le, operator.gt, operator.ge + equality_ops = operator.eq, operator.ne + + # results when InvalidOperation is not trapped + for x, y in qnan_pairs + snan_pairs: + for op in order_ops + equality_ops: + got = op(x, y) + expected = True if op is operator.ne else False + assert expected is got, ( + "expected {0!r} for operator.{1}({2!r}, {3!r}); " + "got {4!r}".format( + expected, op.__name__, x, y, got)) + + # repeat the above, but this time trap the InvalidOperation + with localcontext() as ctx: + ctx.traps[InvalidOperation] = 1 + + for x, y in qnan_pairs: + for op in equality_ops: + got = op(x, y) + expected = True if op is operator.ne else False + assert expected is got, ( + "expected {0!r} for " + "operator.{1}({2!r}, {3!r}); " + "got {4!r}".format( + expected, op.__name__, x, y, got)) + + for x, y in snan_pairs: + for op in equality_ops: + raises(InvalidOperation, operator.eq, x, y) + raises(InvalidOperation, operator.ne, x, y) + + for x, y in qnan_pairs + snan_pairs: + for op in order_ops: + raises(InvalidOperation, op, x, y) + + def test_copy_sign(self): + Decimal = self.decimal.Decimal + + d = Decimal(1).copy_sign(Decimal(-2)) + assert Decimal(1).copy_sign(-2) == d + raises(TypeError, Decimal(1).copy_sign, '-2') diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -47,7 +47,10 @@ "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", "mpd_qquantize", - "mpd_qpow", "mpd_qadd", "mpd_qsub", "mpd_qmul", "mpd_qdiv", + "mpd_qplus", "mpd_qminus", "mpd_qabs", + "mpd_qadd", "mpd_qsub", "mpd_qmul", "mpd_qdiv", "mpd_qdivint", + "mpd_qrem", "mpd_qdivmod", "mpd_qpow", "mpd_qpowmod", + "mpd_qcopy_sign", "mpd_qround_to_int", ], compile_extra=compile_extra, @@ -221,9 +224,17 @@ 'mpd_qquantize', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) -mpd_qpow = external( - 'mpd_qpow', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], +mpd_qplus = external( + 'mpd_qplus', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qminus = external( + 'mpd_qminus', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qabs = external( + 'mpd_qabs', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qadd = external( 'mpd_qadd', @@ -241,6 +252,30 @@ 'mpd_qdiv', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qdivint = external( + 'mpd_qdivint', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qrem = external( + 'mpd_qrem', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qdivmod = external( + 'mpd_qdivmod', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qpow = external( + 'mpd_qpow', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qpowmod = external( + 'mpd_qpowmod', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) +mpd_qcopy_sign = external( + 'mpd_qcopy_sign', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) mpd_qround_to_int = external( 'mpd_qround_to_int', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], From noreply at buildbot.pypy.org Sat May 17 11:56:48 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:48 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Decimal.as_tuple() Message-ID: <20140517095648.2B32A1D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71551:b6fe176810c7 Date: 2014-05-11 20:31 +0200 http://bitbucket.org/pypy/pypy/changeset/b6fe176810c7/ Log: Add Decimal.as_tuple() diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -61,6 +61,10 @@ w_MutableMapping]), space.newdict()) + self.W_DecimalTuple = space.call_method( + w_collections, "namedtuple", + space.wrap("DecimalTuple"), space.wrap("sign digits exponent")) + def state_get(space): return space.fromcache(State) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -315,6 +315,53 @@ def is_infinite_w(self, space): return space.wrap(bool(rmpdec.mpd_isinfinite(self.mpd))) + def as_tuple_w(self, space): + "Return the DecimalTuple representation of a Decimal" + w_sign = space.wrap(rmpdec.mpd_sign(self.mpd)) + if rmpdec.mpd_isinfinite(self.mpd): + w_expt = space.wrap("F") + # decimal.py has non-compliant infinity payloads. + w_coeff = space.newtuple([space.wrap(0)]) + else: + if rmpdec.mpd_isnan(self.mpd): + if rmpdec.mpd_issnan(self.mpd): + w_expt = space.wrap("N") + else: + w_expt = space.wrap("n") + else: + w_expt = space.wrap(self.mpd.c_exp) + + if self.mpd.c_len > 0: + # coefficient is defined + + # make an integer + # XXX this should be done in C... + x = rmpdec.mpd_qncopy(self.mpd) + if not x: + raise OperationError(space.w_MemoryError, space.w_None) + try: + x.c_exp = 0 + # clear NaN and sign + rmpdec.mpd_clear_flags(x) + intstring = rmpdec.mpd_to_sci(x, 1) + finally: + rmpdec.mpd_del(x) + if not intstring: + raise OperationError(space.w_MemoryError, space.w_None) + try: + digits = rffi.charp2str(intstring) + finally: + rmpdec.mpd_free(intstring) + w_coeff = space.newtuple([ + space.wrap(ord(d) - ord('0')) + for d in digits]) + else: + w_coeff = space.newtuple([]) + + return space.call_function( + interp_context.state_get(space).W_DecimalTuple, + w_sign, w_coeff, w_expt) + # Helper functions for arithmetic conversions def convert_op(space, context, w_value): @@ -643,4 +690,6 @@ copy_sign = interp2app(W_Decimal.copy_sign_w), is_qnan = interp2app(W_Decimal.is_qnan_w), is_infinite = interp2app(W_Decimal.is_infinite_w), + # + as_tuple = interp2app(W_Decimal.as_tuple_w), ) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -712,3 +712,49 @@ d = Decimal(1).copy_sign(Decimal(-2)) assert Decimal(1).copy_sign(-2) == d raises(TypeError, Decimal(1).copy_sign, '-2') + + def test_as_tuple(self): + Decimal = self.decimal.Decimal + + #with zero + d = Decimal(0) + assert d.as_tuple() == (0, (0,), 0) + + #int + d = Decimal(-45) + assert d.as_tuple() == (1, (4, 5), 0) + + #complicated string + d = Decimal("-4.34913534E-17") + assert d.as_tuple() == (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) + + # The '0' coefficient is implementation specific to decimal.py. + # It has no meaning in the C-version and is ignored there. + d = Decimal("Infinity") + assert d.as_tuple() == (0, (0,), 'F') + + #leading zeros in coefficient should be stripped + d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) ) + assert d.as_tuple() == (0, (4, 0, 5, 3, 4), -2) + d = Decimal( (1, (0, 0, 0), 37) ) + assert d.as_tuple() == (1, (0,), 37) + d = Decimal( (1, (), 37) ) + assert d.as_tuple() == (1, (0,), 37) + + #leading zeros in NaN diagnostic info should be stripped + d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') ) + assert d.as_tuple() == (0, (4, 0, 5, 3, 4), 'n') + d = Decimal( (1, (0, 0, 0), 'N') ) + assert d.as_tuple() == (1, (), 'N') + d = Decimal( (1, (), 'n') ) + assert d.as_tuple() == (1, (), 'n') + + # For infinities, decimal.py has always silently accepted any + # coefficient tuple. + d = Decimal( (0, (0,), 'F') ) + assert d.as_tuple() == (0, (0,), 'F') + d = Decimal( (0, (4, 5, 3, 4), 'F') ) + assert d.as_tuple() == (0, (0,), 'F') + d = Decimal( (1, (0, 2, 7, 1), 'F') ) + assert d.as_tuple() == (1, (0,), 'F') + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -36,13 +36,14 @@ libdir.join('memory.c'), ], export_symbols=[ - "mpd_qset_ssize", "mpd_qset_uint", "mpd_qset_string", "mpd_qcopy", "mpd_setspecial", + "mpd_qset_ssize", "mpd_qset_uint", "mpd_qset_string", + "mpd_qcopy", "mpd_qncopy", "mpd_setspecial", "mpd_clear_flags", "mpd_qimport_u32", "mpd_qexport_u32", "mpd_qexport_u16", - "mpd_set_sign", "mpd_qfinalize", + "mpd_set_sign", "mpd_sign", "mpd_qfinalize", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", - "mpd_qnew", + "mpd_qnew", "mpd_del", "mpd_to_sci", "mpd_to_sci_size", "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", @@ -156,10 +157,16 @@ MPD_PTR, rffi.UINTP], rffi.SIZE_T) mpd_qcopy = external( 'mpd_qcopy', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) +mpd_qncopy = external( + 'mpd_qncopy', [MPD_PTR], MPD_PTR) mpd_setspecial = external( 'mpd_setspecial', [MPD_PTR, rffi.UCHAR, rffi.UCHAR], lltype.Void) mpd_set_sign = external( 'mpd_set_sign', [MPD_PTR, rffi.UCHAR], lltype.Void) +mpd_clear_flags = external( + 'mpd_clear_flags', [MPD_PTR], lltype.Void) +mpd_sign = external( + 'mpd_sign', [MPD_PTR], rffi.UCHAR) mpd_qfinalize = external( 'mpd_qfinalize', [MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) @@ -191,6 +198,8 @@ mpd_qnew = external( 'mpd_qnew', [], MPD_PTR) +mpd_del = external( + 'mpd_del', [MPD_PTR], lltype.Void) mpd_free = external( 'mpd_free', [rffi.VOIDP], lltype.Void, macro=True) From noreply at buildbot.pypy.org Sat May 17 11:56:49 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:49 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Lot of new Context operations Message-ID: <20140517095649.5B77B1D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71552:3b950bcc282d Date: 2014-05-12 23:13 +0200 http://bitbucket.org/pypy/pypy/changeset/3b950bcc282d/ Log: Lot of new Context operations diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -214,6 +214,42 @@ self.capitals, rffi.cast(lltype.Signed, self.ctx.c_clamp), flags, traps)) + # Unary arithmetic functions + def unary_method(self, space, mpd_func, w_x): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_x) + w_result = interp_decimal.W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, ctx, status_ptr) + return w_result + + def abs_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qabs, w_x) + def exp_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qexp, w_x) + def ln_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qln, w_x) + def log10_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qlog10, w_x) + def minus_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qminus, w_x) + def next_minus_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qnext_minus, w_x) + def next_plus_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qnext_plus, w_x) + def normalize_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qreduce, w_x) + def plus_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qplus, w_x) + def to_integral_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qround_to_int, w_x) + def to_integral_exact_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qround_to_intx, w_x) + def to_integral_value_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qround_to_int, w_x) + def sqrt_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qsqrt, w_x) + # Binary arithmetic functions def binary_method(self, space, mpd_func, w_x, w_y): from pypy.module._decimal import interp_decimal @@ -231,7 +267,60 @@ return self.binary_method(space, rmpdec.mpd_qmul, w_x, w_y) def divide_w(self, space, w_x, w_y): return self.binary_method(space, rmpdec.mpd_qdiv, w_x, w_y) + def compare_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qcompare, w_x, w_y) + def compare_signal_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qcompare_signal, w_x, w_y) + def divide_int_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qdivint, w_x, w_y) + def divmod_w(self, space, w_x, w_y): + from pypy.module._decimal import interp_decimal + return interp_decimal.W_Decimal.divmod_impl(space, self, w_x, w_y) + def max_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qmax, w_x, w_y) + def max_mag_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qmax_mag, w_x, w_y) + def min_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qmin, w_x, w_y) + def min_mag_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qmin_mag, w_x, w_y) + def next_toward_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qnext_toward, w_x, w_y) + def quantize_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qquantize, w_x, w_y) + def remainder_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qrem, w_x, w_y) + def remainder_near_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qrem_near, w_x, w_y) + # Ternary operations + def power_w(self, space, w_a, w_b, w_modulo=None): + from pypy.module._decimal import interp_decimal + w_a, w_b = interp_decimal.convert_binop_raise(space, self, w_a, w_b) + if not space.is_none(w_modulo): + w_modulo = interp_decimal.convert_op_raise(space, self, w_modulo) + else: + w_modulo = None + w_result = interp_decimal.W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + if w_modulo: + rmpdec.mpd_qpowmod(w_result.mpd, w_a.mpd, w_b.mpd, w_modulo.mpd, + ctx, status_ptr) + else: + rmpdec.mpd_qpow(w_result.mpd, w_a.mpd, w_b.mpd, + ctx, status_ptr) + return w_result + + def fma_w(self, space, w_v, w_w, w_x): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, self, w_v) + w_b = interp_decimal.convert_op_raise(space, self, w_w) + w_c = interp_decimal.convert_op_raise(space, self, w_x) + w_result = interp_decimal.W_Decimal.allocate(space) + with self.catch_status(space) as (ctx, status_ptr): + rmpdec.mpd_qfma(w_result.mpd, w_a.mpd, w_b.mpd, w_c.mpd, + ctx, status_ptr) + return w_result def descr_new_context(space, w_subtype, __args__): w_result = space.allocate_instance(W_Context, w_subtype) @@ -257,11 +346,40 @@ clear_flags=interp2app(W_Context.clear_flags_w), clear_traps=interp2app(W_Context.clear_traps_w), create_decimal=interp2app(W_Context.create_decimal_w), - # Operations + # Unary Operations + abs=interp2app(W_Context.abs_w), + exp=interp2app(W_Context.exp_w), + ln=interp2app(W_Context.ln_w), + log10=interp2app(W_Context.log10_w), + minus=interp2app(W_Context.minus_w), + next_minus=interp2app(W_Context.next_minus_w), + next_plus=interp2app(W_Context.next_plus_w), + normalize=interp2app(W_Context.normalize_w), + plus=interp2app(W_Context.plus_w), + to_integral=interp2app(W_Context.to_integral_w), + to_integral_exact=interp2app(W_Context.to_integral_exact_w), + to_integral_value=interp2app(W_Context.to_integral_value_w), + sqrt=interp2app(W_Context.sqrt_w), + # Binary Operations add=interp2app(W_Context.add_w), subtract=interp2app(W_Context.subtract_w), multiply=interp2app(W_Context.multiply_w), divide=interp2app(W_Context.divide_w), + compare=interp2app(W_Context.compare_w), + compare_signal=interp2app(W_Context.compare_signal_w), + divide_int=interp2app(W_Context.divide_int_w), + divmod=interp2app(W_Context.divmod_w), + max=interp2app(W_Context.max_w), + max_mag=interp2app(W_Context.max_mag_w), + min=interp2app(W_Context.min_w), + min_mag=interp2app(W_Context.min_mag_w), + next_toward=interp2app(W_Context.next_toward_w), + quantize=interp2app(W_Context.quantize_w), + remainder=interp2app(W_Context.remainder_w), + remainder_near=interp2app(W_Context.remainder_near_w), + # Ternary operations + power=interp2app(W_Context.power_w), + fma=interp2app(W_Context.fma_w), ) diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -238,9 +238,7 @@ return binary_number_method(space, rmpdec.mpd_qrem, w_other, self) @staticmethod - def divmod_impl(space, w_x, w_y): - context = interp_context.getcontext(space) - + def divmod_impl(space, context, w_x, w_y): w_err, w_a, w_b = convert_binop(space, context, w_x, w_y) if w_err: return w_err @@ -252,9 +250,11 @@ return space.newtuple([w_q, w_r]) def descr_divmod(self, space, w_other): - return W_Decimal.divmod_impl(space, self, w_other) + context = interp_context.getcontext(space) + return W_Decimal.divmod_impl(space, context, self, w_other) def descr_rdivmod(self, space, w_other): - return W_Decimal.divmod_impl(space, w_other, self) + context = interp_context.getcontext(space) + return W_Decimal.divmod_impl(space, context, w_other, self) @staticmethod def pow_impl(space, w_base, w_exp, w_mod): diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -47,12 +47,17 @@ "mpd_to_sci", "mpd_to_sci_size", "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", - "mpd_qcmp", "mpd_qquantize", + "mpd_qcmp", "mpd_qcompare", "mpd_qcompare_signal", + "mpd_qmin", "mpd_qmax", "mpd_qmin_mag", "mpd_qmax_mag", + "mpd_qnext_minus", "mpd_qnext_plus", "mpd_qnext_toward", + "mpd_qquantize", "mpd_qreduce", "mpd_qplus", "mpd_qminus", "mpd_qabs", "mpd_qadd", "mpd_qsub", "mpd_qmul", "mpd_qdiv", "mpd_qdivint", - "mpd_qrem", "mpd_qdivmod", "mpd_qpow", "mpd_qpowmod", + "mpd_qrem", "mpd_qrem_near", "mpd_qdivmod", "mpd_qpow", "mpd_qpowmod", + "mpd_qfma", + "mpd_qexp", "mpd_qln", "mpd_qlog10", "mpd_qsqrt", "mpd_qcopy_sign", - "mpd_qround_to_int", + "mpd_qround_to_int", "mpd_qround_to_intx", ], compile_extra=compile_extra, libraries=['m'], @@ -229,9 +234,40 @@ 'mpd_isqnan', [MPD_PTR], rffi.INT) mpd_qcmp = external( 'mpd_qcmp', [MPD_PTR, MPD_PTR, rffi.UINTP], rffi.INT) +mpd_qcompare = external( + 'mpd_qcompare', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qcompare_signal = external( + 'mpd_qcompare_signal', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) + +mpd_qmin = external( + 'mpd_qmin', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qmax = external( + 'mpd_qmax', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qmin_mag = external( + 'mpd_qmin_mag', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qmax_mag = external( + 'mpd_qmax_mag', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qnext_minus = external( + 'mpd_qnext_minus', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qnext_plus = external( + 'mpd_qnext_plus', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qnext_toward = external( + 'mpd_qnext_toward', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qquantize = external( - 'mpd_qquantize', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + 'mpd_qquantize', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qreduce = external( + 'mpd_qreduce', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qplus = external( 'mpd_qplus', @@ -251,36 +287,51 @@ lltype.Void) mpd_qsub = external( 'mpd_qsub', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qmul = external( 'mpd_qmul', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qdiv = external( 'mpd_qdiv', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qdivint = external( 'mpd_qdivint', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qrem = external( 'mpd_qrem', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qrem_near = external( + 'mpd_qrem_near', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qdivmod = external( 'mpd_qdivmod', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qpow = external( 'mpd_qpow', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], - lltype.Void) + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qpowmod = external( 'mpd_qpowmod', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qfma = external( + 'mpd_qfma', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) + +mpd_qexp = external( + 'mpd_qexp', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qln = external( + 'mpd_qln', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qlog10 = external( + 'mpd_qlog10', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qsqrt = external( + 'mpd_qsqrt', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) + mpd_qcopy_sign = external( 'mpd_qcopy_sign', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], @@ -289,3 +340,6 @@ mpd_qround_to_int = external( 'mpd_qround_to_int', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qround_to_intx = external( + 'mpd_qround_to_intx', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + lltype.Void) From noreply at buildbot.pypy.org Sat May 17 11:56:50 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:50 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Tranlation fixes Message-ID: <20140517095650.810CA1D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71553:3d5dc83a95a4 Date: 2014-05-16 21:57 +0200 http://bitbucket.org/pypy/pypy/changeset/3d5dc83a95a4/ Log: Tranlation fixes diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1548,7 +1548,7 @@ if space.isinstance_w(w_source, space.w_unicode): from pypy.interpreter.unicodehelper import encode - w_source = encode(space, w_source) + w_source = encode(space, w_source, 'utf-8') source = space.bytes0_w(w_source) flags |= consts.PyCF_IGNORE_COOKIE elif space.isinstance_w(w_source, space.w_bytes): diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -18,6 +18,9 @@ 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', 'MAX_PREC': 'space.wrap(interp_decimal.MAX_PREC)', + 'MAX_EMAX': 'space.wrap(interp_decimal.MAX_EMAX)', + 'MAX_EMIN': 'space.wrap(interp_decimal.MAX_EMIN)', + 'MAX_ETINY': 'space.wrap(interp_decimal.MAX_ETINY)', } for name in rmpdec.ROUND_CONSTANTS: interpleveldefs[name] = 'space.wrap(%r)' % name diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -13,6 +13,10 @@ IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS MAX_PREC = rmpdec.MPD_MAX_PREC +MAX_EMAX = rmpdec.MPD_MAX_EMAX +MAX_EMIN = rmpdec.MPD_MAX_EMIN +MAX_ETINY = rmpdec.MPD_MAX_ETINY + # DEC_MINALLOC >= MPD_MINALLOC DEC_MINALLOC = 4 @@ -306,7 +310,7 @@ w_result = W_Decimal.allocate(space) with context.catch_status(space) as (ctx, status_ptr): rmpdec.mpd_qcopy_sign(w_result.mpd, self.mpd, w_other.mpd, - ctx, status_ptr) + status_ptr) return w_result # Boolean functions diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -90,6 +90,9 @@ MPD_IEEE_CONTEXT_MAX_BITS = platform.ConstantInteger( 'MPD_IEEE_CONTEXT_MAX_BITS') MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') + MPD_MAX_EMAX = platform.ConstantInteger('MPD_MAX_EMAX') + MPD_MAX_EMIN = platform.ConstantInteger('MPD_MAX_EMIN') + MPD_MAX_ETINY = platform.ConstantInteger('MPD_MAX_ETINY') MPD_MAX_SIGNAL_LIST = platform.ConstantInteger('MPD_MAX_SIGNAL_LIST') MPD_SIZE_MAX = platform.ConstantInteger('MPD_SIZE_MAX') MPD_SSIZE_MAX = platform.ConstantInteger('MPD_SSIZE_MAX') @@ -334,7 +337,7 @@ mpd_qcopy_sign = external( 'mpd_qcopy_sign', - [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], + [MPD_PTR, MPD_PTR, MPD_PTR, rffi.UINTP], lltype.Void) mpd_qround_to_int = external( From noreply at buildbot.pypy.org Sat May 17 11:56:51 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 17 May 2014 11:56:51 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix include paths (why do we copy all .c into the temporary directory?) Message-ID: <20140517095651.B93341D237F@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71554:2e77fc3c5412 Date: 2014-05-17 11:55 +0200 http://bitbucket.org/pypy/pypy/changeset/2e77fc3c5412/ Log: Fix include paths (why do we copy all .c into the temporary directory?) diff --git a/rpython/translator/c/src/libmpdec/basearith.c b/rpython/translator/c/src/libmpdec/basearith.c --- a/rpython/translator/c/src/libmpdec/basearith.c +++ b/rpython/translator/c/src/libmpdec/basearith.c @@ -26,15 +26,15 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include #include -#include "constants.h" -#include "memory.h" -#include "typearith.h" -#include "basearith.h" +#include "src/libmpdec/constants.h" +#include "src/libmpdec/memory.h" +#include "src/libmpdec/typearith.h" +#include "src/libmpdec/basearith.h" /*********************************************************************/ diff --git a/rpython/translator/c/src/libmpdec/constants.c b/rpython/translator/c/src/libmpdec/constants.c --- a/rpython/translator/c/src/libmpdec/constants.c +++ b/rpython/translator/c/src/libmpdec/constants.c @@ -26,9 +26,9 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include -#include "constants.h" +#include "src/libmpdec/constants.h" #if defined(CONFIG_64) diff --git a/rpython/translator/c/src/libmpdec/context.c b/rpython/translator/c/src/libmpdec/context.c --- a/rpython/translator/c/src/libmpdec/context.c +++ b/rpython/translator/c/src/libmpdec/context.c @@ -26,7 +26,7 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include diff --git a/rpython/translator/c/src/libmpdec/convolute.c b/rpython/translator/c/src/libmpdec/convolute.c --- a/rpython/translator/c/src/libmpdec/convolute.c +++ b/rpython/translator/c/src/libmpdec/convolute.c @@ -26,16 +26,16 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include -#include "bits.h" -#include "constants.h" -#include "fnt.h" -#include "fourstep.h" -#include "numbertheory.h" -#include "sixstep.h" -#include "umodarith.h" -#include "convolute.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/constants.h" +#include "src/libmpdec/fnt.h" +#include "src/libmpdec/fourstep.h" +#include "src/libmpdec/numbertheory.h" +#include "src/libmpdec/sixstep.h" +#include "src/libmpdec/umodarith.h" +#include "src/libmpdec/convolute.h" /* Bignum: Fast convolution using the Number Theoretic Transform. Used for diff --git a/rpython/translator/c/src/libmpdec/crt.c b/rpython/translator/c/src/libmpdec/crt.c --- a/rpython/translator/c/src/libmpdec/crt.c +++ b/rpython/translator/c/src/libmpdec/crt.c @@ -26,12 +26,12 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include -#include "numbertheory.h" -#include "umodarith.h" -#include "crt.h" +#include "src/libmpdec/numbertheory.h" +#include "src/libmpdec/umodarith.h" +#include "src/libmpdec/crt.h" /* Bignum: Chinese Remainder Theorem, extends the maximum transform length. */ diff --git a/rpython/translator/c/src/libmpdec/difradix2.c b/rpython/translator/c/src/libmpdec/difradix2.c --- a/rpython/translator/c/src/libmpdec/difradix2.c +++ b/rpython/translator/c/src/libmpdec/difradix2.c @@ -26,13 +26,13 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include -#include "bits.h" -#include "numbertheory.h" -#include "umodarith.h" -#include "difradix2.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/numbertheory.h" +#include "src/libmpdec/umodarith.h" +#include "src/libmpdec/difradix2.h" /* Bignum: The actual transform routine (decimation in frequency). */ diff --git a/rpython/translator/c/src/libmpdec/fnt.c b/rpython/translator/c/src/libmpdec/fnt.c --- a/rpython/translator/c/src/libmpdec/fnt.c +++ b/rpython/translator/c/src/libmpdec/fnt.c @@ -26,14 +26,14 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include -#include "bits.h" -#include "difradix2.h" -#include "numbertheory.h" -#include "fnt.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/difradix2.h" +#include "src/libmpdec/numbertheory.h" +#include "src/libmpdec/fnt.h" /* Bignum: Fast transform for medium-sized coefficients. */ diff --git a/rpython/translator/c/src/libmpdec/fourstep.c b/rpython/translator/c/src/libmpdec/fourstep.c --- a/rpython/translator/c/src/libmpdec/fourstep.c +++ b/rpython/translator/c/src/libmpdec/fourstep.c @@ -26,13 +26,13 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include -#include "numbertheory.h" -#include "sixstep.h" -#include "transpose.h" -#include "umodarith.h" -#include "fourstep.h" +#include "src/libmpdec/numbertheory.h" +#include "src/libmpdec/sixstep.h" +#include "src/libmpdec/transpose.h" +#include "src/libmpdec/umodarith.h" +#include "src/libmpdec/fourstep.h" /* Bignum: Cache efficient Matrix Fourier Transform for arrays of the diff --git a/rpython/translator/c/src/libmpdec/io.c b/rpython/translator/c/src/libmpdec/io.c --- a/rpython/translator/c/src/libmpdec/io.c +++ b/rpython/translator/c/src/libmpdec/io.c @@ -26,7 +26,7 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include @@ -35,11 +35,11 @@ #include #include #include -#include "bits.h" -#include "constants.h" -#include "memory.h" -#include "typearith.h" -#include "io.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/constants.h" +#include "src/libmpdec/memory.h" +#include "src/libmpdec/typearith.h" +#include "src/libmpdec/io.h" /* This file contains functions for decimal <-> string conversions, including diff --git a/rpython/translator/c/src/libmpdec/memory.c b/rpython/translator/c/src/libmpdec/memory.c --- a/rpython/translator/c/src/libmpdec/memory.c +++ b/rpython/translator/c/src/libmpdec/memory.c @@ -26,11 +26,11 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include -#include "typearith.h" -#include "memory.h" +#include "src/libmpdec/typearith.h" +#include "src/libmpdec/memory.h" /* Guaranteed minimum allocation for a coefficient. May be changed once diff --git a/rpython/translator/c/src/libmpdec/mpdecimal.c b/rpython/translator/c/src/libmpdec/mpdecimal.c --- a/rpython/translator/c/src/libmpdec/mpdecimal.c +++ b/rpython/translator/c/src/libmpdec/mpdecimal.c @@ -26,19 +26,19 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include #include #include -#include "basearith.h" -#include "bits.h" -#include "convolute.h" -#include "crt.h" -#include "memory.h" -#include "typearith.h" -#include "umodarith.h" +#include "src/libmpdec/basearith.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/convolute.h" +#include "src/libmpdec/crt.h" +#include "src/libmpdec/memory.h" +#include "src/libmpdec/typearith.h" +#include "src/libmpdec/umodarith.h" #ifdef PPRO #if defined(_MSC_VER) diff --git a/rpython/translator/c/src/libmpdec/numbertheory.c b/rpython/translator/c/src/libmpdec/numbertheory.c --- a/rpython/translator/c/src/libmpdec/numbertheory.c +++ b/rpython/translator/c/src/libmpdec/numbertheory.c @@ -26,12 +26,12 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include -#include "bits.h" -#include "umodarith.h" -#include "numbertheory.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/umodarith.h" +#include "src/libmpdec/numbertheory.h" /* Bignum: Initialize the Number Theoretic Transform. */ diff --git a/rpython/translator/c/src/libmpdec/sixstep.c b/rpython/translator/c/src/libmpdec/sixstep.c --- a/rpython/translator/c/src/libmpdec/sixstep.c +++ b/rpython/translator/c/src/libmpdec/sixstep.c @@ -26,16 +26,16 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include -#include "bits.h" -#include "difradix2.h" -#include "numbertheory.h" -#include "transpose.h" -#include "umodarith.h" -#include "sixstep.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/difradix2.h" +#include "src/libmpdec/numbertheory.h" +#include "src/libmpdec/transpose.h" +#include "src/libmpdec/umodarith.h" +#include "src/libmpdec/sixstep.h" /* Bignum: Cache efficient Matrix Fourier Transform for arrays of the diff --git a/rpython/translator/c/src/libmpdec/transpose.c b/rpython/translator/c/src/libmpdec/transpose.c --- a/rpython/translator/c/src/libmpdec/transpose.c +++ b/rpython/translator/c/src/libmpdec/transpose.c @@ -26,16 +26,16 @@ */ -#include "mpdecimal.h" +#include "src/libmpdec/mpdecimal.h" #include #include #include #include #include -#include "bits.h" -#include "constants.h" -#include "typearith.h" -#include "transpose.h" +#include "src/libmpdec/bits.h" +#include "src/libmpdec/constants.h" +#include "src/libmpdec/typearith.h" +#include "src/libmpdec/transpose.h" #define BUFSIZE 4096 From noreply at buildbot.pypy.org Sat May 17 16:04:03 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 16:04:03 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: improve dispatching of HLOperation.consider() Message-ID: <20140517140403.C8D8A1D2CEE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71555:d2264acf455f Date: 2014-05-17 01:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d2264acf455f/ Log: improve dispatching of HLOperation.consider() diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -89,8 +89,10 @@ def constfold(self): return None - def consider(self, annotator, *argcells): - raise NotImplementedError + def consider(self, annotator, *args): + args_s = [arg.ann for arg in args] + spec = type(self).get_specialization(*args_s) + return spec(*args) class PureOperation(HLOperation): pure = True @@ -137,18 +139,23 @@ class SingleDispatchMixin(object): dispatch = 1 - def consider(self, annotator, arg, *other_args): - impl = getattr(arg.ann, self.opname) - s_others = [x.ann for x in other_args] - return impl(*s_others) + @classmethod + def get_specialization(cls, s_arg, *_ignored): + impl = getattr(s_arg, cls.opname) + def specialized(arg, *other_args): + return impl(*[x.ann for x in other_args]) + return specialized + class DoubleDispatchMixin(object): dispatch = 2 - def consider(self, annotator, arg1, arg2, *other_args): - impl = getattr(pair(arg1.ann, arg2.ann), self.opname) - s_others = [arg.ann for arg in other_args] - return impl(*s_others) + @classmethod + def get_specialization(cls, s_arg1, s_arg2, *_ignored): + impl = getattr(pair(s_arg1, s_arg2), cls.opname) + def specialized(arg1, arg2, *other_args): + return impl(*[x.ann for x in other_args]) + return specialized def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): @@ -372,8 +379,12 @@ pyfunc = staticmethod(operator.contains) # XXX "contains" clash with SomeObject method - def consider(self, annotator, seq, elem): - return seq.ann.op_contains(elem.ann) + @classmethod + def get_specialization(cls, s_seq, s_elem): + impl = s_seq.op_contains + def specialized(seq, elem): + return impl(elem.ann) + return specialized class NewDict(HLOperation): From noreply at buildbot.pypy.org Sat May 17 16:04:04 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 16:04:04 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: allow registration of specialized annotators for unary operations using AnnotatedValues Message-ID: <20140517140404.E73741D2CEE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71556:fadabd4a7942 Date: 2014-05-17 03:00 +0100 http://bitbucket.org/pypy/pypy/changeset/fadabd4a7942/ Log: allow registration of specialized annotators for unary operations using AnnotatedValues diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -13,7 +13,7 @@ from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc -from rpython.annotator.model import SomeTuple +from rpython.annotator.model import SomeTuple, AnnotatorError from rpython.flowspace.specialcase import SPECIAL_CASES @@ -53,6 +53,13 @@ type.__init__(cls, name, bases, attrdict) if hasattr(cls, 'opname'): setattr(op, cls.opname, cls) + cls._registry = {} + + def register(cls, Some_cls): + def decorator(func): + cls._registry[Some_cls] = func + return decorator + class HLOperation(SpaceOperation): __metaclass__ = HLOperationMeta @@ -140,11 +147,23 @@ dispatch = 1 @classmethod + def _dispatch(cls, Some_cls): + for c in Some_cls.__mro__: + try: + return cls._registry[c] + except KeyError: + pass + raise AnnotatorError("Unknown operation") + + @classmethod def get_specialization(cls, s_arg, *_ignored): - impl = getattr(s_arg, cls.opname) - def specialized(arg, *other_args): - return impl(*[x.ann for x in other_args]) - return specialized + try: + impl = getattr(s_arg, cls.opname) + def specialized(arg, *other_args): + return impl(*[x.ann for x in other_args]) + return specialized + except AttributeError: + return cls._dispatch(type(s_arg)) class DoubleDispatchMixin(object): From noreply at buildbot.pypy.org Sat May 17 16:04:06 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 16:04:06 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register annotator for op.type; kill one use of the _find_current_op() hack Message-ID: <20140517140406.266241D2CEE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71557:6ff2b5fd1e02 Date: 2014-05-17 03:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6ff2b5fd1e02/ Log: register annotator for op.type; kill one use of the _find_current_op() hack diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -19,18 +19,14 @@ UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) + at op.type.register(SomeObject) +def type(arg): + r = SomeType() + r.is_type_of = [arg.value] + return r class __extend__(SomeObject): - def type(self, *moreargs): - if moreargs: - raise Exception('type() called with more than one argument') - r = SomeType() - bk = getbookkeeper() - op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=self) - r.is_type_of = [op.args[0]] - return r - def issubtype(self, s_cls): if hasattr(self, 'is_type_of'): vars = self.is_type_of From noreply at buildbot.pypy.org Sat May 17 16:04:07 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 16:04:07 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register annotator for op.bool; kill another use of the _find_current_op() hack Message-ID: <20140517140407.5745F1D2CEE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71558:f0b04e182370 Date: 2014-05-17 05:57 +0100 http://bitbucket.org/pypy/pypy/changeset/f0b04e182370/ Log: register annotator for op.bool; kill another use of the _find_current_op() hack diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -20,11 +20,23 @@ if oper.dispatch == 1]) @op.type.register(SomeObject) -def type(arg): +def type_SomeObject(arg): r = SomeType() r.is_type_of = [arg.value] return r + at op.bool.register(SomeObject) +def bool_SomeObject(obj): + r = SomeBool() + obj.ann.bool_behavior(r) + s_nonnone_obj = obj.ann + if s_nonnone_obj.can_be_none(): + s_nonnone_obj = s_nonnone_obj.nonnoneify() + knowntypedata = {} + add_knowntypedata(knowntypedata, True, [obj.value], s_nonnone_obj) + r.set_knowntypedata(knowntypedata) + return r + class __extend__(SomeObject): def issubtype(self, s_cls): @@ -48,21 +60,6 @@ if s_len.is_immutable_constant(): s.const = s_len.const > 0 - def bool(s_obj): - r = SomeBool() - s_obj.bool_behavior(r) - - bk = getbookkeeper() - knowntypedata = {} - op = bk._find_current_op(opname="bool", arity=1) - arg = op.args[0] - s_nonnone_obj = s_obj - if s_obj.can_be_none(): - s_nonnone_obj = s_obj.nonnoneify() - add_knowntypedata(knowntypedata, True, [arg], s_nonnone_obj) - r.set_knowntypedata(knowntypedata) - return r - def hash(self): raise AnnotatorError("cannot use hash() in RPython") From noreply at buildbot.pypy.org Sat May 17 20:22:13 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 20:22:13 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: put AnnotatedValues in annotator.bindings instead of Somes Message-ID: <20140517182213.321351C31AD@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71559:43c0d2e098d4 Date: 2014-05-17 19:21 +0100 http://bitbucket.org/pypy/pypy/changeset/43c0d2e098d4/ Log: put AnnotatedValues in annotator.bindings instead of Somes diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -33,7 +33,7 @@ translator.annotator = self self.translator = translator self.pendingblocks = {} # map {block: graph-containing-it} - self.bindings = {} # map Variables to SomeValues + self.bindings = {} # map Variables to AnnotatedValues self.annotated = {} # set of blocks already seen self.added_blocks = None # see processblock() below self.links_followed = {} # set of links that have ever been followed @@ -153,7 +153,7 @@ elif isinstance(variable, Variable): cell = self.bindings.get(variable) if cell: - return cell.knowntype + return cell.ann.knowntype else: return object else: @@ -230,7 +230,7 @@ "Gives the SomeValue corresponding to the given Variable or Constant." if isinstance(arg, Variable): try: - return self.bindings[arg] + return self.bindings[arg].ann except KeyError: if default is not FAIL: return default @@ -242,19 +242,25 @@ raise TypeError('Variable or Constant expected, got %r' % (arg,)) def annvalue(self, arg): - return AnnotatedValue(arg, self.binding(arg)) + if isinstance(arg, Variable): + return self.bindings[arg] + else: + return AnnotatedValue(arg, self.bookkeeper.immutablevalue(arg.value)) def typeannotation(self, t): return signature.annotation(t, self.bookkeeper) def setbinding(self, arg, s_value): if arg in self.bindings: - assert s_value.contains(self.bindings[arg]) - self.bindings[arg] = s_value + assert s_value.contains(self.bindings[arg].ann) + self.bindings[arg].ann = s_value + else: + self.bindings[arg] = AnnotatedValue(arg, s_value) def transfer_binding(self, v_target, v_source): assert v_source in self.bindings - self.bindings[v_target] = self.bindings[v_source] + self.bindings[v_target] = AnnotatedValue(v_target, + self.bindings[v_source].ann) def warning(self, msg, pos=None): if pos is None: @@ -293,7 +299,7 @@ # get the (current) return value v = graph.getreturnvar() try: - return self.bindings[v] + return self.binding(v) except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) @@ -446,7 +452,7 @@ # is known exits = block.exits if isinstance(block.exitswitch, Variable): - s_exitswitch = self.bindings[block.exitswitch] + s_exitswitch = self.binding(block.exitswitch) if s_exitswitch.is_constant(): exits = [link for link in exits if link.exitcase == s_exitswitch.const] @@ -485,8 +491,10 @@ # mapping (exitcase, variable) -> s_annotation # that can be attached to booleans, exitswitches - knowntypedata = getattr(self.bindings.get(block.exitswitch), - "knowntypedata", {}) + knowntypedata = {} + if isinstance(block.exitswitch, Variable): + knowntypedata = getattr(self.binding(block.exitswitch), + "knowntypedata", {}) for link in exits: self.follow_link(graph, link, knowntypedata) if block in self.notify: diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -857,7 +857,8 @@ s = a.build_types(snippet.harmonic, [int]) assert s.knowntype == float # check that the list produced by range() is not mutated or resized - for s_value in a.bindings.values(): + for value in a.bindings.values(): + s_value = value.ann if isinstance(s_value, annmodel.SomeList): assert not s_value.listdef.listitem.resized assert not s_value.listdef.listitem.mutated @@ -2751,8 +2752,8 @@ a = self.RPythonAnnotator() a.build_types(f, []) v1, v2 = graphof(a, readout).getargs() - assert not a.bindings[v1].is_constant() - assert not a.bindings[v2].is_constant() + assert not a.binding(v1).is_constant() + assert not a.binding(v2).is_constant() def test_prebuilt_mutables_dont_use_eq(self): # test that __eq__ is not called during annotation, at least diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -19,7 +19,7 @@ def sigof(a, f): # returns [param1, param2, ..., ret] g = graphof(a.translator, f) - return [a.bindings[v] for v in g.startblock.inputargs] + [a.bindings[g.getreturnvar()]] + return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())] def getsig(f, policy=None): a = annotate_at(f, policy=policy) diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -106,7 +106,7 @@ def format_simple_call(annotator, oper, msg): msg.append("Occurred processing the following simple_call:") try: - descs = annotator.bindings[oper.args[0]].descriptions + descs = annotator.binding(oper.args[0]).descriptions except (KeyError, AttributeError), e: msg.append(" (%s getting at the binding!)" % ( e.__class__.__name__,)) diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py --- a/rpython/translator/tool/graphpage.py +++ b/rpython/translator/tool/graphpage.py @@ -107,7 +107,8 @@ # make the dictionary of links -- one per annotated variable self.current_value = {} if self.annotator: - for var, s_value in self.annotator.bindings.items(): + for var, value in self.annotator.bindings.items(): + s_value = value.ann info = '%s: %s' % (var.name, s_value) annotationcolor = getattr(s_value, 'annotationcolor', None) self.links[var.name] = info, annotationcolor diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -178,8 +178,7 @@ assert 0 <= n < len(block.operations) # chop off the unreachable end of the block del block.operations[n+1:] - s_impossible = annmodel.SomeImpossibleValue() - self.bindings[block.operations[n].result] = s_impossible + self.setbinding(block.operations[n].result, annmodel.s_ImpossibleValue) # insert the equivalent of 'raise AssertionError' graph = self.annotated[block] msg = "Call to %r should have raised an exception" % (getattr(graph, 'func', None),) From noreply at buildbot.pypy.org Sat May 17 22:51:13 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 17 May 2014 22:51:13 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: put AnnotatedValues in .is_type_of Message-ID: <20140517205113.46EB81D2D44@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71560:1b64ec913ad6 Date: 2014-05-17 21:50 +0100 http://bitbucket.org/pypy/pypy/changeset/1b64ec913ad6/ Log: put AnnotatedValues in .is_type_of diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -243,7 +243,12 @@ def annvalue(self, arg): if isinstance(arg, Variable): - return self.bindings[arg] + try: + return self.bindings[arg] + except KeyError: + value = AnnotatedValue(arg, None) + self.bindings[arg] = value + return value else: return AnnotatedValue(arg, self.bookkeeper.immutablevalue(arg.value)) @@ -252,8 +257,10 @@ def setbinding(self, arg, s_value): if arg in self.bindings: - assert s_value.contains(self.bindings[arg].ann) - self.bindings[arg].ann = s_value + value = self.bindings[arg] + if value.ann is not None: + assert s_value.contains(value.ann) + value.ann = s_value else: self.bindings[arg] = AnnotatedValue(arg, s_value) @@ -517,13 +524,13 @@ last_exception_object = annmodel.SomeType() if isinstance(last_exception_var, Constant): last_exception_object.const = last_exception_var.value - last_exception_object.is_type_of = [last_exc_value_var] + last_exception_object.is_type_of = [ + self.annvalue(last_exc_value_var)] if isinstance(last_exception_var, Variable): self.setbinding(last_exception_var, last_exception_object) if isinstance(last_exc_value_var, Variable): self.setbinding(last_exc_value_var, last_exc_value_object) - last_exception_object = annmodel.SomeType() if isinstance(last_exception_var, Constant): last_exception_object.const = last_exception_var.value @@ -545,7 +552,7 @@ elif a == last_exc_value_var: assert in_except_block cells.append(last_exc_value_object) - last_exc_value_vars.append(v) + last_exc_value_vars.append(self.annvalue(v)) else: cell = self.binding(a) if (link.exitcase, a) in knowntypedata: @@ -558,8 +565,8 @@ if hasattr(cell,'is_type_of'): renamed_is_type_of = [] for v in cell.is_type_of: - new_vs = renaming.get(v,[]) - renamed_is_type_of += new_vs + new_vs = renaming.get(v.value, []) + renamed_is_type_of += map(self.annvalue, new_vs) assert cell.knowntype is type newcell = annmodel.SomeType() if cell.is_constant(): diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -115,8 +115,10 @@ def bind(src_obj, tgt_obj, tgt_arg): if hasattr(tgt_obj, 'is_type_of') and src_obj.is_constant(): - add_knowntypedata(knowntypedata, True, tgt_obj.is_type_of, - bk.valueoftype(src_obj.const)) + add_knowntypedata( + knowntypedata, True, + [inst.value for inst in tgt_obj.is_type_of], + bk.valueoftype(src_obj.const)) assert annotator.binding(op.args[tgt_arg]) == tgt_obj add_knowntypedata(knowntypedata, True, [op.args[tgt_arg]], src_obj) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1399,7 +1399,7 @@ et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() t.const = KeyError - t.is_type_of = [ev] + t.is_type_of = [a.annvalue(ev)] assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError) @@ -1414,7 +1414,7 @@ fg = graphof(a, f) et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() - t.is_type_of = [ev] + t.is_type_of = [a.annvalue(ev)] t.const = KeyError # IndexError ignored because 'dic' is a dict assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError) @@ -1449,7 +1449,7 @@ fg = graphof(a, f) et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() - t.is_type_of = [ev] + t.is_type_of = [a.annvalue(ev)] assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) @@ -1471,7 +1471,7 @@ fg = graphof(a, f) et, ev = fg.exceptblock.inputargs t = annmodel.SomeType() - t.is_type_of = [ev] + t.is_type_of = [a.annvalue(ev)] assert a.binding(et) == t assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -22,7 +22,7 @@ @op.type.register(SomeObject) def type_SomeObject(arg): r = SomeType() - r.is_type_of = [arg.value] + r.is_type_of = [arg] return r @op.bool.register(SomeObject) @@ -41,10 +41,10 @@ def issubtype(self, s_cls): if hasattr(self, 'is_type_of'): - vars = self.is_type_of + instances = self.is_type_of annotator = getbookkeeper().annotator - return builtin.builtin_isinstance(annotator.binding(vars[0]), - s_cls, vars) + return builtin.builtin_isinstance(instances[0].ann, s_cls, + [x.value for x in instances]) if self.is_constant() and s_cls.is_constant(): return immutablevalue(issubclass(self.const, s_cls.const)) return s_Bool diff --git a/rpython/translator/goal/query.py b/rpython/translator/goal/query.py --- a/rpython/translator/goal/query.py +++ b/rpython/translator/goal/query.py @@ -48,11 +48,12 @@ s_ev = annotator.binding(ev, None) if s_et: if s_et.knowntype == type: - if s_et.__class__ == annmodel.SomeType: - if hasattr(s_et, 'is_type_of') and s_et.is_type_of == [ev]: + if isinstance(s_et, annmodel.SomeType): + if (hasattr(s_et, 'is_type_of') and + s_et.is_type_of == [annotator.annvalue(ev)]): continue else: - if s_et.__class__ == annmodel.SomePBC: + if isinstance(s_et, annmodel.SomePBC): continue yield "%s exceptblock is not completely sane" % graph.name diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -191,7 +191,7 @@ # fix the annotation of the exceptblock.inputargs etype, evalue = graph.exceptblock.inputargs s_type = annmodel.SomeType() - s_type.is_type_of = [evalue] + s_type.is_type_of = [self.annvalue(evalue)] s_value = annmodel.SomeInstance(self.bookkeeper.getuniqueclassdef(Exception)) self.setbinding(etype, s_type) self.setbinding(evalue, s_value) From noreply at buildbot.pypy.org Sun May 18 00:28:41 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Sun, 18 May 2014 00:28:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: STMFork primtive works. The image introduces in 832 was not related to previous Message-ID: <20140517222841.1ABEA1C3306@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r833:3ec34c19651b Date: 2014-05-18 00:12 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3ec34c19651b/ Log: STMFork primtive works. The image introduces in 832 was not related to previous images. diff too long, truncating to 2000 out of 363031 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -1,39 +1,443 @@ -'From Squeak4.1 of 17 April 2010 [latest update: #9957] on 17 April 2010 at 5:22:05 pm'! ----STARTUP----{17 April 2010 . 5:21:54 pm} as C:\Squeak\4.0\4.1-final\Squeak4.1.image! Smalltalk appendChangesTo: 'SqueakV41.sources'.! ----QUIT----{17 April 2010 . 5:22:11 pm} Squeak4.1.image priorSource: 89! ----STARTUP----{24 May 2010 . 8:07:26 pm} as C:\Squeak\4.2\Squeak4.1.image! ----SNAPSHOT----{24 May 2010 . 8:08:14 pm} Squeak4.2.image priorSource: 229! !HashedCollection commentStamp: 'ul 4/12/2010 22:37' prior: 0! I am an abstract collection of objects that implement hash and equality in a consitent way. This means that whenever two objects are equal, their hashes have to be equal too. If two objects are equal then I can only store one of them. Hashes are expected to be integers (preferably SmallIntegers). I also expect that the objects contained by me do not change their hashes. If that happens, hash invariants have to be re-established, which can be done by #rehash. Since I'm abstract, no instances of me should exist. My subclasses should implement #scanFor:, #fixCollisionsFrom: and #noCheckNoGrowFillFrom:. Instance Variables array: (typically Array or WeakArray) tally: (non-negative) array - An array whose size is a prime number, it's non-nil elements are the elements of the collection, and whose nil elements are empty slots. There is always at least one nil. In fact I try to keep my "load" at 75% or less so that hashing will work well. tally - The number of elements in the collection. The array size is always greater than this. Implementation details: I implement a hash table which uses open addressing with linear probing as the method of collision resolution. Searching for an element or a free slot for an element is done by #scanFor: which should return the index of the slot in array corresponding to it's argument. When an element is removed #fixCollisionsFrom: should rehash all elements in array between the original index of the removed element, wrapping around after the last slot until reaching an empty slot. My maximum load factor (75%) is hardcoded in #atNewIndex:put:, so it can only be changed by overriding that method. When my load factor reaches this limit I replace my array with a larger one (see #grow) ensuring that my load factor will be less than or equal to 50%. The new array is filled by #noCheckNoGrowFillFrom: which should use #scanForEmptySlotFor: instead of #scanFor: for better performance. I do not shrink. ! !WeakKeyDictionary methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Collection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:33' prior: 18816249! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger timesRepeat: [self add: newObject]. ^ newObject! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAll "HashedCollection compactAll" self allSubclassesDo: #compactAllInstances! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAllInstances "Do not use #allInstancesDo: because compact may create new instances." self allInstances do: #compact! ! !HashedCollection class methodsFor: 'sizing' stamp: 'ul 4/7/2010 00:17' prior: 55063414! goodPrimes "Answer a sorted array of prime numbers less than one billion that make good hash table sizes. Should be expanded as needed. See comments below code" ^#( 5 11 17 23 31 43 59 79 107 149 199 269 359 479 641 857 1151 1549 2069 2237 2423 2617 2797 2999 3167 3359 3539 3727 3911 4441 4787 5119 5471 5801 6143 6521 6827 7177 7517 7853 8783 9601 10243 10867 11549 12239 12919 13679 14293 15013 15731 17569 19051 20443 21767 23159 24611 25847 27397 28571 30047 31397 35771 38201 40841 43973 46633 48989 51631 54371 57349 60139 62969 70589 76091 80347 85843 90697 95791 101051 106261 111143 115777 120691 126311 140863 150523 160969 170557 181243 190717 201653 211891 221251 232591 242873 251443 282089 300869 321949 341227 362353 383681 401411 422927 443231 464951 482033 504011 562621 605779 647659 681607 723623 763307 808261 844709 886163 926623 967229 1014617 1121987 1201469 1268789 1345651 1429531 1492177 1577839 1651547 1722601 1800377 1878623 1942141 2028401 2242727 2399581 2559173 2686813 2836357 3005579 3144971 3283993 3460133 3582923 3757093 3903769 4061261 4455361 4783837 5068529 5418079 5680243 6000023 6292981 6611497 6884641 7211599 7514189 7798313 8077189 9031853 9612721 10226107 10745291 11338417 11939203 12567671 13212697 13816333 14337529 14938571 15595673 16147291 17851577 18993941 20180239 21228533 22375079 23450491 24635579 25683871 26850101 27921689 29090911 30153841 31292507 32467307 35817611 37983761 40234253 42457253 44750177 46957969 49175831 51442639 53726417 55954637 58126987 60365939 62666977 64826669 71582779 76039231 80534381 84995153 89500331 93956777 98470819 102879613 107400389 111856841 116365721 120819287 125246581 129732203 143163379 152076289 161031319 169981667 179000669 187913573 196826447 205826729 214748357 223713691 232679021 241591901 250504801 259470131 285162679 301939921 318717121 335494331 352271573 369148753 385926017 402603193 419480419 436157621 453034849 469712051 486589307 503366497 520043707 570475349 603929813 637584271 671138659 704693081 738247541 771801929 805356457 838910803 872365267 905919671 939574117 973128521 1006682977 1040137411 1073741833) "The above primes past 2069 were chosen carefully so that they do not interact badly with 1664525 (used by hashMultiply), and so that gcd(p, (256^k) +/- a) = 1, for 0 cost ifTrue: [ cost := newCost ] ]. cost ]."! ! !HashedCollection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:38' prior: 53647096! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger < 1 ifTrue: [ ^newObject ]. ^self add: newObject "I can only store an object once." ! ! !HashedCollection methodsFor: 'private' stamp: 'ul 4/12/2010 22:53'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: tally * 4 // 3. self growTo: newCapacity! ! !WeakSet methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Symbol class methodsFor: 'class initialization' stamp: 'ul 4/13/2010 00:00' prior: 30357901! compactSymbolTable "Reduce the size of the symbol table so that it holds all existing symbols with 25% free space." | oldSize | Smalltalk garbageCollect. oldSize := SymbolTable capacity. SymbolTable compact. ^(oldSize - SymbolTable capacity) printString, ' slot(s) reclaimed'! ! KeyedIdentitySet class removeSelector: #goodPrimes! WeakIdentityKeyDictionary class removeSelector: #goodPrimes! IdentitySet class removeSelector: #goodPrimes! IdentityDictionary class removeSelector: #goodPrimes! "Collections"! !HashedCollectionTest methodsFor: 'test - class - sizing' stamp: 'ul 4/7/2010 00:18' prior: 58761579! testPrimes: primes | badPrimes | badPrimes := #(3 5 71 139 479 5861 277421). "These primes are less than the hashMultiply constant (1664525) and 1664525 \\ prime is close to 0 (mod prime). The following snippet reproduces these numbers: | hashMultiplyConstant | hashMultiplyConstant := 1 hashMultiply. (Integer primesUpTo: hashMultiplyConstant) select: [ :each | | remainder | remainder := hashMultiplyConstant \\ each. remainder <= 1 or: [ remainder + 1 = each ] ]." self assert: primes isSorted. primes do: [ :each | self assert: each isPrime. self deny: (each > 2069 and: [ badPrimes includes: each ]) ]. self assert: ( primes select: [ :p | | result | result := false. p > 2069 ifTrue: [ 1 to: 8 do: [ :k | 1 to: 32 do: [ :a | (p gcd: (256 raisedTo: k) + a) = 1 ifFalse: [ result := true ]. (p gcd: (256 raisedTo: k) - a) = 1 ifFalse: [ result := true ] ] ] ]. result ]) isEmpty.! ! HashedCollectionTest removeSelector: #testGoodPrimesForIdentityBasedHashedCollections! "CollectionsTests"! !MCMczReader methodsFor: 'as yet unclassified' stamp: 'bf 4/18/2010 18:38' prior: 22938947! extractInfoFrom: dict ^MCWorkingCopy infoFromDictionary: dict cache: self infoCache! ! !MCWorkingCopy class methodsFor: 'as yet unclassified' stamp: 'bf 4/19/2010 00:39' prior: 23215403! infoFromDictionary: aDictionary cache: cache | id | id := (aDictionary at: #id) asString. ^ cache at: id ifAbsentPut: [MCVersionInfo name: (aDictionary at: #name ifAbsent: ['']) id: (UUID fromString: id) message: (aDictionary at: #message ifAbsent: ['']) date: ([Date fromString: (aDictionary at: #date)] ifError: [nil]) time: ([Time fromString: (aDictionary at: #time)] ifError: [nil]) author: (aDictionary at: #author ifAbsent: ['']) ancestors: (self ancestorsFromArray: (aDictionary at: #ancestors ifAbsent: []) cache: cache) stepChildren: (self ancestorsFromArray: (aDictionary at: #stepChildren ifAbsent: []) cache: cache)]! ! !MCVersionInfo methodsFor: 'converting' stamp: 'bf 4/18/2010 23:25' prior: 23175569! asDictionary ^ Dictionary new at: #name put: name; at: #id put: id asString; at: #message put: message; at: #date put: date; at: #time put: time; at: #author put: author; at: #ancestors put: (self ancestors collect: [:a | a asDictionary]); yourself! ! "Monticello"! !BlockContextTest methodsFor: 'running' stamp: 'md 9/6/2005 19:56' prior: 50431957! setUp super setUp. aBlockContext := [100 at 100 corner: 200 at 200]. contextOfaBlockContext := thisContext.! ! !BehaviorTest methodsFor: 'tests' stamp: 'md 2/18/2006 16:42' prior: 17365994! testBinding self assert: Object binding value = Object. self assert: Object binding key = #Object. self assert: Object class binding value = Object class. "returns nil for Metaclasses... like Encoder>>#associationFor:" self assert: Object class binding key = nil.! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53956757! testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #EmbeddedSourceQCompress ). self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer sourceCode: code. self assert: (trailer kind == #EmbeddedSourceZip ). newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53957691! testEmbeddingTempNames | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer tempNames: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #TempsNamesQCompress ). self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer tempNames: code. self assert: (trailer kind == #TempsNamesZip ). newTrailer := trailer testEncoding. self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:17' prior: 53958613! testEncodingNoTrailer | trailer | trailer := CompiledMethodTrailer new. "by default it should be a no-trailer" self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:14' prior: 53959109! testEncodingSourcePointer | trailer | trailer := CompiledMethodTrailer new. CompiledMethod allInstancesDo: [:method | | ptr | trailer method: method. self assert: ( (ptr := method sourcePointer) == trailer sourcePointer). "the last bytecode index must be at 0" ptr ~= 0 ifTrue: [ self assert: (method endPC = trailer endPC) ]. ].! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53959564! testEncodingVarLengthSourcePointer | trailer newTrailer | trailer := CompiledMethodTrailer new. trailer sourcePointer: 1. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 1). trailer sourcePointer: 16r100000000000000. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 16r100000000000000). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53960108! testSourceByIdentifierEncoding | trailer id | trailer := CompiledMethodTrailer new. id := UUID new asString. trailer sourceIdentifier: id. self assert: (trailer kind == #SourceByStringIdentifier ). trailer := trailer testEncoding. self assert: (trailer kind == #SourceByStringIdentifier ). self assert: (trailer sourceIdentifier = id). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:49' prior: 53960643! testSourceBySelectorEncoding | trailer | trailer := CompiledMethodTrailer new. trailer setSourceBySelector. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CategorizerTest methodsFor: 'running' stamp: 'mtf 9/10/2007 10:10' prior: 18074036! setUp categorizer := Categorizer defaultList: #(a b c d e). categorizer classifyAll: #(a b c) under: 'abc'. categorizer addCategory: 'unreal'.! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074267! testClassifyNewElementNewCategory categorizer classify: #f under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') (''nice'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:18' prior: 18074541! testClassifyNewElementOldCategory categorizer classify: #f under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074806! testClassifyOldElementNewCategory categorizer classify: #e under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'') (''nice'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:54' prior: 18075078! testClassifyOldElementOldCategory categorizer classify: #e under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:22' prior: 18075341! testDefaultCategoryIsTransient "Test that category 'as yet unclassified' disapears when all it's elements are removed'" categorizer classifyAll: #(d e) under: #abc. self assert: categorizer printString = '(''abc'' a b c d e) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 15:15' prior: 18075669! testNullCategory "Test that category 'as yet unclassified' disapears when all it's elements are removed'" | aCategorizer | aCategorizer := Categorizer defaultList: #(). self assert: aCategorizer printString = '(''as yet unclassified'') '. self assert: aCategorizer categories = #('no messages'). aCategorizer classify: #a under: #b. self assert: aCategorizer printString = '(''b'' a) '. self assert: aCategorizer categories = #(b).! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18076194! testRemoveEmptyCategory categorizer removeCategory: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:55' prior: 18076430! testRemoveExistingElement categorizer removeElement: #a. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076673! testRemoveNonEmptyCategory self should: [categorizer removeCategory: #abc] raise: Error. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076950! testRemoveNonExistingCategory categorizer removeCategory: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18077203! testRemoveNonExistingElement categorizer removeElement: #f. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 14:49' prior: 18077451! testRemoveThenRename categorizer removeCategory: #unreal. categorizer renameCategory: #abc toBe: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''unreal'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:14' prior: 18077736! testUnchanged self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! "KernelTests"! !SmalltalkImage methodsFor: 'accessing' stamp: 'ul 4/18/2010 22:22'! at: key ifPresentAndInMemory: aBlock "Lookup the given key in the receiver. If it is present, answer the value of evaluating the given block with the value associated with the key. Otherwise, answer nil." ^globals at: key ifPresentAndInMemory: aBlock! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:45'! image "Answer the object to query about the current object memory and execution environment." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:47'! imageFormatVersion "Answer an integer identifying the type of image. The image version number may identify the format of the image (e.g. 32 or 64-bit word size) or specific requirements of the image (e.g. block closure support required). This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk image imageFormatVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveImageFormatVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:38'! interpreterSourceVersion "Answer a string corresponding to the version of the interpreter source. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, as distinct from the external platform source code, typically written in C and managed separately for each platform. An optional primitive is invoked that may not be available on all virtual machines." "Smalltalk vm interpreterSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveInterpreterSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! platformSourceVersion "Answer a string corresponding to the version of the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk vm platformSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitivePlatformSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'image' stamp: 'md 5/16/2006 12:34' prior: 58536670! version "Answer the version of this release." ^SystemVersion current version! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! versionLabel "Answer a string corresponding to the version of virtual machine. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, in addition to the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines. See also vmVersion, which answers a string identifying the image from which virtual machine sources were generated." "Smalltalk vm versionLabel" self notify: 'This virtual machine does not support the optional primitive #primitiveVMVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:15'! vm "Answer the object to query about virtual machine." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 1/4/2010 21:40' prior: 58537225! wordSize "Answer the size in bytes of an object pointer or word in the object memory. The value does not change for a given image, but may be modified by a SystemTracer when converting the image to another format. The value is cached in WordSize to avoid the performance overhead of repeatedly consulting the VM." "Smalltalk wordSize" ^ WordSize ifNil: [WordSize := [SmalltalkImage current vmParameterAt: 40] on: Error do: [4]]! ! "System"! !SMLoaderPlus commentStamp: 'btr 12/1/2006 15:16' prior: 0! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). It uses ToolBuilder to construct its window. You can open one with: SMLoaderPlus open Instance Variables categoriesToFilterIds: The set of categories to filter the packages list. filters: The set of filters to apply to the packages list. map: The model SqueakMap. packagesList: The list of packages from the map. selectedCategory: The current category. selectedItem: The selected package or release. window: The window, held only so we can reOpen.! !SMLoaderCategoricalPlus commentStamp: 'btr 12/4/2006 15:47' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategoricalPlus open! !SMLoader commentStamp: 'btr 11/30/2006 18:00' prior: 27913009! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). You can open one with: SMLoader open! !SMLoaderCategorical commentStamp: 'btr 12/1/2006 15:16' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategorical open! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 18:06'! initialize Smalltalk at: #ToolBuilder ifPresent: [:tb | (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! openMenuString ^ 'SqueakMap Categories'! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! removeFromSystem (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self removeFromSystem: true! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString].! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:50'! buildFancyWith: aBuilder "Creates a variant of the window where the package pane is split between installed and uninstalled packages." | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight)); yourself); add: ((self buildNotInstalledPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ (horizDivide / 2)); yourself); add: ((self buildInstalledPackagesListWith: builder) frame: (vertDivide @ (horizDivide / 2) corner: 1 @ horizDivide); yourself); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1); yourself); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. self setUpdatablePanesFrom: #(#installedPackageList #notInstalledPackageList ). currentPackageList := #notInstalled. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:56'! buildInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #installedPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:52'! buildNotInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #notInstalledPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:55'! buildWith: aBuilder | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight))); add: ((self buildPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ horizDivide)); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1)); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList ^currentPackageList! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList: aSymbol currentPackageList := aSymbol. self changed: #installButtonLabel.! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:55'! defaultLabel ^ 'Categorical ' , super defaultLabel! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:58'! installButtonLabel ^ self currentPackageList = #notInstalled ifTrue: ['Install the above package'] ifFalse: ['Remove the above package']! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:52'! installedPackageList ^self packageList select: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! installedPackagesListIndex ^ self currentPackageList = #installed ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! installedPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #installed ifTrue: [self currentPackageList: #installed. self changed: #currentPackageList]. self noteChanged! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! isOn ^false! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:53'! notInstalledPackageList ^self packageList reject: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! notInstalledPackagesListIndex ^ self currentPackageList = #notInstalled ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:03'! notInstalledPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #notInstalled ifTrue: [self currentPackageList: #notInstalled. self changed: #currentPackageList]. self changed: #packagesListIndex. "update my selection" self noteChanged. self contentsChanged! ! !SMLoaderCategoricalPlus methodsFor: 'private' stamp: 'btr 12/1/2006 17:53'! noteChanged self changed: #installedPackageList. self changed: #notInstalledPackageList. super noteChanged." self changed: #packageNameList. self changed: #packagesListIndex. self changed: #categoriesForPackage. self contentsChanged."! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:34'! packageList ^ self packages select: [:e | (e categories anySatisfy: [:cat | cat = self selectedCategory]) and: [(filters ifNil: [#()]) allSatisfy: [:currFilter | (self perform: currFilter) value: e]]]! ! !SMLoaderPlus class methodsFor: 'parts bin' stamp: 'btr 11/22/2006 15:02'! descriptionForPartsBin ^self partName: 'Package Loader' categories: #(Tools) documentation: 'SqueakMap UI' ! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47'! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifPresent: [:tb | self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white "not set" ifTrue: [ Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor) ]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! initializedInstance ^ (ToolBuilder open: self new) extent: 400 at 400! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! new "Create a SqueakMap loader on the default map." ^self newOn: SMSqueakMap default! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! newOn: aMap "Create a SqueakMap loader on given map." ^super new on: aMap; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! newStandAlone ^ ToolBuilder open: self new! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:13'! open "Create and open a SqueakMap Loader." "SMLoaderPlus open" ^ (Smalltalk at: #ToolBuilder) open: self new! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:21'! openOn: aSqueakMap "Create and open a SqueakMap Loader on a given map." "self openOn: SqueakMap default" ^ (Smalltalk at: #ToolBuilder) open: (self newOn: aSqueakMap)! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:18'! prototypicalToolWindow ^ ToolBuilder open: self new; applyModelExtent; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:02'! registerInFlapsRegistry "Register the receiver in the system's flaps registry." self environment at: #Flaps ifPresent: [:cl | (cl respondsTo: #registerQuad:forFlapNamed:) ifTrue: [cl registerQuad: #(#SMLoader #prototypicalToolWindow 'Package Loader' 'The SqueakMap Package Loader' ) forFlapNamed: 'Tools']]! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoaderPlus class methodsFor: 'window color' stamp: 'btr 11/22/2006 15:02'! windowColorSpecification "Answer a WindowColorSpec object that declares my preference." ^WindowColorSpec classSymbol: self name wording: 'Package Loader' brightColor: Color yellow muchLighter duller pastelColor: Color yellow veryMuchLighter duller helpMessage: 'The SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! addSelectedCategoryAsFilter "Add a new filter that filters on the currently selected category. Make it enabled as default." categoriesToFilterIds add: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 16:11'! askToLoadUpdates "Check how old the map is and ask to update it if it is older than 10 days or if there is no map on disk." | available | available := map isCheckpointAvailable. (available not or: [ (Date today subtractDate: (Date fromSeconds: (map directory directoryEntryFor: map lastCheckpointFilename) modificationTime)) > 3]) ifTrue: [ (self confirm: (available ifTrue: ['The map on disk is more than 10 days old, update it from the Internet?'] ifFalse: ['There is no map on disk, fetch it from the Internet?'])) ifTrue: [self loadUpdates]]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:43'! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. dir := item isPackage ifTrue: [map cache directoryForPackage: item] ifFalse: [map cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. "withLabel: item name, ' cache directory'." win openInWorld! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildButtonBarWith: aBuilder ^ aBuilder pluggablePanelSpec new model: self; layout: #horizontal; children: (self commandSpecs select: [ :spec | spec fourth includes: #all] thenCollect: [ :spec | aBuilder pluggableActionButtonSpec new model: self; label: spec first; action: spec second; help: spec third; enabled: ((spec fourth includes: #item) ifTrue: [#hasSelectedItem]); yourself]); name: #buttonBar; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildCategoriesListWith: aBuilder "Create the hierarchical list holding the category tree." ^ aBuilder pluggableTreeSpec new model: self; roots: #categoryList; getSelectedPath: #selectedCategoryPath; getChildren: #categoryChildren:; hasChildren: #categoryHasChildren:; setSelected: #selectedCategory:; menu: #categoriesMenu:; label: #categoryLabel:; autoDeselect: true; wantsDrop: true; name: #categoriesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagePaneWith: aBuilder "Create the text area to the right in the loader." ^ aBuilder pluggableTextSpec new model: self; getText: #itemDescription; name: #packagePane; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagesListWith: aBuilder "Create the hierarchical list holding the packages and releases." ^ aBuilder pluggableTreeSpec new model: self; roots: #packageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; name: #packagesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildSearchPaneWith: aBuilder ^ aBuilder pluggableInputFieldSpec new model: self; selection: #searchSelection; getText: #searchText; setText: #findPackage:notifying:; name: #search; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:54'! buildWith: aBuilder "Create the package loader window." | buttonBarHeight vertDivide horizDivide | buttonBarHeight := 0.07. vertDivide := 0.6. horizDivide := 0.3. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight)); add: ((self buildSearchPaneWith: builder) frame: (0 @ buttonBarHeight corner: horizDivide @ (buttonBarHeight * 2))); add: ((self buildPackagesListWith: builder) frame: (0 @ (buttonBarHeight * 2) corner: horizDivide @ vertDivide)); add: ((self buildCategoriesListWith: builder) frame: (0 @ vertDivide corner: horizDivide @ 1)); add: ((self buildPackagePaneWith: builder) frame: (horizDivide @ buttonBarHeight corner: 1 @ 1)); yourself); yourself). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! cachePackageReleaseAndOfferToCopy "Cache package release, then offer to copy it somewhere. Answer the chosen file's location after copy, or the cache location if no directory was chosen." | release installer newDir newName newFile oldFile oldName | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. installer := SMInstaller forPackageRelease: release. [UIManager default informUser: 'Caching ' , release asString during: [installer cache]] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs. ^nil ]. installer isCached ifFalse: [self inform: 'Download failed, see transcript for details'. ^nil]. oldName := installer fullFileName. newDir := FileList2 modalFolderSelector: installer directory. newDir ifNil: [ ^oldName ]. newDir = installer directory ifTrue: [ ^oldName ]. newName := newDir fullNameFor: installer fileName. newFile := FileStream newFileNamed: newName. newFile ifNil: [ ^oldName ]. newFile binary. oldFile := FileStream readOnlyFileNamed: oldName. oldFile ifNil: [ ^nil ]. oldFile binary. [[ newDir copyFile: oldFile toFile: newFile ] ensure: [ oldFile close. newFile close ]] on: Error do: [ :ex | ^oldName ]. ^newName! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categoriesMenu: aMenu "Answer the categories-list menu." self selectedCategory ifNotNil: [aMenu addList: self categorySpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! categoryChildren: aCategory ^ aCategory subCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! categoryHasChildren: aCategory ^ aCategory hasSubCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:46'! categoryLabel: aCategory ^ aCategory name! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01'! categoryList "Create the category list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (map categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/22/2006 15:02'! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:01'! commandSpecFor: selector ^ self commandSpecs detect: [:spec | spec second = selector]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:00'! commandSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.' (item all)) ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.' (item all)) ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.' (item all)) ('Copy from cache' cachePackageReleaseAndOfferToCopy 'Download selected release into cache first if needed, and then offer to copy it somewhere else.' (item)) ('Force download into cache' downloadPackageRelease 'Force a download of the selected release into the cache.' (item)) ('Update' loadUpdates 'Update the package index from the servers.' (all)) ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (conf8irming each).' (all)) ('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm '' (item)) ('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm '' (item)) ('Copy list' listInPasteBuffer 'Puts the list as text into the clipboard.' (all)) ('Save filters' saveFiltersAsDefault 'Saves the current filters as default.' (all)) ('Help' help 'What is this?' (all)))! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! defaultLabel ^ 'SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! downloadPackageRelease "Force a download of the selected package release into the cache." | release | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. [UIManager default informUser: 'Downloading ' , release asString during: [ (SMInstaller forPackageRelease: release) download] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterAdd: anObject self changeFilters: (self filters copyWith: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterAutoInstall ^[:package | package isInstallable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterAvailable ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterInstalled ^[:package | package isInstalled]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterNotInstalledYet ^[:package | package isInstalled not]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterNotUptoDate ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterPublished ^[:package | package isPublished]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterRemove: anObject self changeFilters: (self filters copyWithout: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterSafelyAvailable ^[:package | package isSafelyAvailable]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/30/2006 21:07'! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #(#('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') #('Published packages' #filterPublished 'Display only packages that have at least one published release.') ) asOrderedCollection. categoriesToFilterIds do: [:catId | specs add: {'Packages in ' , (map object: catId) name. catId. 'Display only packages that are in the category.'}]. ^ specs! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterVersion "Ignore spaces in the version string, they're sometimes spurious. Not used anymore." ^[:package | package categories anySatisfy: [:cat | (cat name, '*') match: (Smalltalk version copyWithout: $ ) ]]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filters ^filters! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/24/2006 13:49'! findPackage: aString notifying: aView "Search and select a package with the given (sub) string in the name or description. " | index list match descriptions | match := aString asString asLowercase. index := self packagesListIndex. list := self packageNameList. list isEmpty ifTrue: [^ self]. descriptions := self packageList collect: [:e | e description]. index + 1 to: list size do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. "wrap around" 1 to: index do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. self inform: 'No package matching ' , aString asString! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! generalOptions ^#( #('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm) #('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm) #('Put list in paste buffer' listInPasteBuffer) #('Save filters as default' saveFiltersAsDefault) #- ) ! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 18:36'! hasSelectedItem ^ self selectedPackageOrRelease notNil! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! help "Present help text. If there is a web server available, offer to open it. Use the WebBrowser registry if possible, or Scamper if available." | message browserClass | message := 'Welcome to the SqueakMap package loader. The names of packages are followed by versions: (installed -> latest). If there is no arrow, your installed version of the package is the latest. Bold packages and releases have been installed. The checkbox menu items modify which packages you''ll see. Take a look at them - only some packages are shown initially. The options available for a package depend on how it was packaged. Comment on a package by emailing the author or the squeak list.'. browserClass := Smalltalk at: #WebBrowser ifPresent: [ :registry | registry default ]. browserClass := browserClass ifNil: [ Smalltalk at: #Scamper ifAbsent: [ ^self inform: message ]]. (self confirm: message, ' Would you like to view more detailed help on the SqueakMap swiki page?') ifTrue: [ browserClass openOnUrl: 'http://wiki.squeak.org/2726' asUrl]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! informException: ex msg: msg "Tell the user that an error has occurred. Offer to open debug notifier." (self confirm: msg, 'Would you like to open a debugger?') ifTrue: [ex pass]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 05:28'! initialExtent ^500 at 400! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! installPackageRelease "Install selected package or release. The cache is used." | item release | item := self selectedPackageOrRelease ifNil: [^ nil]. item isPackageRelease ifTrue: [ (item isPublished or: [self confirm: 'Selected release is not published yet, install anyway?']) ifTrue: [^self installPackageRelease: item]] ifFalse: [ release := item lastPublishedReleaseForCurrentSystemVersion. release ifNil: [ (self confirm: 'The package has no published release for your Squeak version, try releases for any Squeak version?') ifTrue: [ release := item lastPublishedRelease. release ifNil: [ (self confirm: 'The package has no published release at all, take the latest of the unpublished releases?') ifTrue: [release := item lastRelease]]]]. release ifNotNil: [^self installPackageRelease: release]]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:53'! installPackageRelease: aRelease "Install a package release. The cache is used." | myRelease installer | aRelease isCompatibleWithCurrentSystemVersion ifFalse: [(self confirm: 'The package you are about to install is not listed as being compatible with your image version (', SystemVersion current majorMinorVersion, '), so the package may not work properly. Do you still want to proceed with the install?') ifFalse: [^ self]]. myRelease := self installedReleaseOfMe. installer := SMInstaller forPackageRelease: aRelease. [UIManager default informUser: 'Downloading ' , aRelease asString during: [installer download]. UIManager default informUser: 'Installing ' , aRelease asString during: [ installer install. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil:[ex asString]. self informException: ex msg: ('Error occurred during install:\', msg, '\') withCRs].! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! installedReleaseOfMe "Return the release of the installed package loader." ^SMSqueakMap default installedReleaseOf: (SMSqueakMap default packageWithId: '941c0108-4039-4071-9863-a8d7d2b3d4a3').! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! itemChildren: anItem ^ anItem isPackage ifTrue: [anItem releases] ifFalse: [#()]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 19:56'! itemDescription ^ self selectedPackageOrRelease ifNil: [''] ifNotNilDo: [:item | item fullDescription]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! itemHasChildren: anItem ^ anItem isPackage and: [anItem releases notEmpty]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! itemLabel: anItem | label | label := anItem isPackage ifTrue: [anItem name , (anItem versionLabel ifEmpty: [''] ifNotEmptyDo: [:lbl | ' (' , anItem versionLabel , ')'])] ifFalse: [anItem smartVersion]. ^ anItem isInstalled ifTrue: [label asText allBold] ifFalse: [label]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/24/2006 17:17'! label ^ self labelForShown: (packagesList ifNil: [self packageList])! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! labelForFilter: aFilterSymbol ^(self filterSpecs detect: [:fs | fs second = aFilterSymbol]) first! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! labelForShown: packagesShown "Update the label of the window." ^ self defaultLabel , ' (', (packagesShown size < map packages size ifTrue: [packagesShown size printString, ' shown out of '] ifFalse: ['']) , map packages size printString, ' packages)'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! listInPasteBuffer "Useful when talking with people etc. Uses the map to produce a nice String." Clipboard clipboardText: (String streamContents: [:s | packagesList do: [:p | s nextPutAll: p nameWithVersionLabel; cr ]]) asText! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:31'! loadUpdates [UIManager default informUser: 'Loading Updates' during: [ map loadUpdates. self noteChanged ] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when updating map:\', ex messageText, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/24/2006 14:05'! noteChanged filters ifNil: [^ self reOpen]. map ifNotNil: [packagesList := nil. selectedCategory := nil. self changed: #categoryList. self changed: #packageList. self changed: #packagesListIndex. "update my selection" self contentsChanged]! ! !SMLoaderPlus methodsFor: 'initialization' stamp: 'btr 11/22/2006 16:11'! on: aSqueakMap "Initialize instance." map := aSqueakMap. map synchWithDisk. filters := DefaultFilters copy. categoriesToFilterIds := DefaultCategoriesToFilterIds copy. self askToLoadUpdates! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! package: aPackage filteredByCategory: aCategory "Answer true if the package should be shown if we filter on . It should be shown if itself or any of its releases has the category." | releases | releases := aPackage releases. ^(aPackage hasCategoryOrSubCategoryOf: aCategory) or: [ releases anySatisfy: [:rel | rel hasCategoryOrSubCategoryOf: aCategory]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageList "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." | list | list := packagesList ifNil: [packagesList := self packageListCalculated]. selectedCategory ifNotNil: [ list := list select: [:each | self package: each filteredByCategory: selectedCategory]]. self updateLabel: list. ^ list! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageListCalculated "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." ^ self packages select: [:p | filters allSatisfy: [:currFilter | currFilter isSymbol ifTrue: [(self perform: currFilter) value: p] ifFalse: [self package: p filteredByCategory: (map object: currFilter)]]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! packageNameList ^ self packageList collect: [:e | e name]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:30'! packageSpecificOptions | choices packageOrRelease | packageOrRelease := self selectedPackageOrRelease. choices := OrderedCollection new. packageOrRelease isInstallable ifTrue: [ choices add: (self commandSpecFor: #installPackageRelease)]. (packageOrRelease isDownloadable and: [packageOrRelease isCached]) ifTrue: [ choices add: (self commandSpecFor: #browseCacheDirectory)]. (packageOrRelease isPackageRelease and: [packageOrRelease isDownloadable]) ifTrue: [ choices add: (self commandSpecFor: #cachePackageReleaseAndOfferToCopy). choices add: (self commandSpecFor: #downloadPackageRelease)]. choices add: (self commandSpecFor: #emailPackageMaintainers). ^ choices! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 16:11'! packages "We request the packages as sorted by name by default." ^map packagesByName asArray ! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex ^ self packageList indexOf: self selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex: anObject self selectedItem: (anObject = 0 ifFalse: [self packageList at: anObject])! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! packagesMenu: aMenu "Answer the packages-list menu." self selectedPackageOrRelease ifNotNil: [aMenu addList: self packageSpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:45'! perform: selector orSendTo: otherTarget "Selector was just chosen from a menu by a user. If can respond, then perform it on myself. If not, send it to otherTarget, presumably the editPane from which the menu was invoked." ^ (self respondsTo: selector) ifTrue: [self perform: selector] ifFalse: [super perform: selector orSendTo: otherTarget]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/26/2006 23:22'! reOpen "Close this package loader, probably because it has been updated, and open a new one." self inform: 'This package loader has been upgraded and will be closed and reopened to avoid strange side effects.'. window delete. (Smalltalk at: self class name) open! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeCategoryFilters "Remove all category filters." categoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeSelectedCategoryAsFilter "Remove the filter that filters on the currently selected category." categoriesToFilterIds remove: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! saveFiltersAsDefault "Save the current filters as default so that they are selected the next time the loader is opened." DefaultFilters := filters copy. DefaultCategoriesToFilterIds := categoriesToFilterIds copy! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchSelection "Selects all of the default search text so that a type-in overwrites it." ^ {1. self searchText size}! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchText "A dummy default search text so that the field describes its purpose." ^ 'Search packages'! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedCategory "Return selected category." ^ selectedCategory! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:37'! selectedCategory: anSMCategory "Change the selected category." selectedCategory := anSMCategory. selectedCategory ifNotNil: [(selectedCategory objects includes: self selectedItem) ifFalse: [self selectedItem: nil]]. self changed: #selectedCategory. self changed: #packageList! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:52'! selectedCategoryPath "Return selected category's path." | path | path := #(). selectedCategory ifNotNil: [selectedCategory parent ifNotNilDo: [:p | path := path copyWith: p]. path := path copyWith: selectedCategory]. ^ path collect: [:cat | self categoryLabel: cat]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedItem ^ selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:27'! selectedItem: anItem "This == workaround protects us from recursion since ToolBuilder's tree widgets will always tell us that the selection has been updated when we tell it that the selection path has been updated. Cleaner solutions invited." anItem == selectedItem ifFalse: [ selectedItem := anItem. self changed: #selectedItemPath. self changed: #itemDescription. self changed: #hasSelectedItem]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:16'! selectedItemPath | path | path := #(). (selectedItem isKindOf: SMPackageRelease) ifTrue: [path := path copyWith: selectedItem package]. selectedItem ifNotNil: [path := path copyWith: selectedItem]. ^ path! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:03'! selectedPackageOrRelease "Return selected package or package release." ^ selectedItem! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! showFilterString: aFilterSymbol ^(self stateForFilter: aFilterSymbol), (self labelForFilter: aFilterSymbol)! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! stateForFilter: aFilterSymbol ^(self filters includes: aFilterSymbol) ifTrue: [''] ifFalse: [''] ! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! toggleFilterState: aFilterSymbol ^(self filters includes: (aFilterSymbol)) ifTrue: [self filterRemove: aFilterSymbol] ifFalse: [self filterAdd: aFilterSymbol]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! uncheckFilters "Uncheck all filters." filters := OrderedCollection new. self noteChanged! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! updateLabel: packagesShown "Update the label of the window." window ifNotNilDo: [:w | w setLabel: (self labelForShown: packagesShown)]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackages "Tries to upgrade all installed packages to the latest published release for this version of Squeak. So this is a conservative approach." | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [ ^self inform: 'All ', installed size printString, ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [ ^self inform: 'None of the ', old size printString, ' old packages of the ', installed size printString, ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: [ 'Of the ', old size printString, ' old packages only ', toUpgrade size printString, ' can be upgraded. The following packages will not be upgraded: ', (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info, ' About to upgrade the following packages: ', (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]), 'Proceed?') ifTrue: [ myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [ map upgradeOldPackages. self inform: toUpgrade size printString, ' packages successfully upgraded.'. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\', ex messageText, '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. Confirms on each upgrade." ^ self upgradeInstalledPackagesConfirm: true! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackagesConfirm: confirmEach "Tries to upgrade all installed packages to the latest published release for this version of Squeak. If confirmEach is true we ask for every upgrade. " | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [^ self inform: 'All ' , installed size printString , ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [^ self inform: 'None of the ' , old size printString , ' old packages of the ' , installed size printString , ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: ['Of the ' , old size printString , ' old packages only ' , toUpgrade size printString , ' can be upgraded. The following packages will not be upgraded: ' , (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info , ' About to upgrade the following packages: ' , (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]) , 'Proceed?') ifTrue: [myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [confirmEach ifTrue: [map upgradeOldPackagesConfirmBlock: [:p | self confirm: 'Upgrade ' , p installedRelease packageNameWithVersion , ' to ' , (p lastPublishedReleaseForCurrentSystemVersionNewerThan: p installedRelease) listName , '?']] ifFalse: [map upgradeOldPackages]. self inform: toUpgrade size printString , ' packages successfully processed.'. myRelease = self installedReleaseOfMe ifTrue: [self noteChanged] ifFalse: [self reOpen]]] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\' , ex messageText , '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesNoConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. No confirmation on each upgrade." ^ self upgradeInstalledPackagesConfirm: false! ! !SMPackageWrapper methodsFor: 'comparing' stamp: 'dvf 9/21/2003 16:25' prior: 27998626! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMPackageWrapper methodsFor: 'converting' stamp: 'btr 11/22/2006 00:54' prior: 27998778! asString | string | string := item name, ' (', item versionLabel, ')'. item isInstalled ifTrue: [string := string asText allBold]. "(string includesSubString: '->') ifTrue: [string := string asText color: Color green]." ^ string! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'dvf 10/14/2003 18:58' prior: 27998902! contents ^item releases reversed collect: [:e | SMPackageReleaseWrapper with: e]! ! !SMPackageWrapper methodsFor: 'testing' stamp: 'dvf 9/21/2003 16:25' prior: 27999070! hash ^self withoutListWrapper hash! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:55'! help ^ 'This shows all packages with their releases that should be displayed according the current filter.'! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString! ! !SMPackageWrapper methodsFor: 'printing' stamp: 'dvf 9/21/2003 16:22' prior: 27999192! printOn: aStream aStream nextPutAll: 'wrapper for: ', item printString! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849043! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMCategoryWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 18:53' prior: 27849195! asString ^ item name , ' (' , self numberOfObjects printString , ')'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'ar 2/9/2004 02:35' prior: 27849301! category ^item! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 21:02' prior: 27849402! contents ^ item subCategories collect: [:n | self class with: n model: n]! ! !SMCategoryWrapper methodsFor: 'model access' stamp: 'btr 11/30/2006 21:02'! getList ^ Array with: (self class with: self contents model: model)! ! !SMCategoryWrapper methodsFor: 'testing' stamp: 'btr 11/30/2006 18:53'! hasContents ^ item hasSubCategories! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849700! hash ^self withoutListWrapper hash! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:56'! help ^ 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'BJP 11/22/2002 14:17'! model ^model! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 18:53'! numberOfObjects " | total | total _ 0. model allCategoriesDo: [:c | total _ total + c objects size]. ^total" ^item objects size! ! !SMPackageReleaseWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 21:30' prior: 27997393! asString "Show installed releases with a trailing asterisk." | string | string := item smartVersion. "Older SMBase versions don't have isInstalled.'" (item respondsTo: #isInstalled) ifTrue: [item isInstalled ifTrue: [string := (string , ' *') asText allBold]]. ^ string! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 17:14'! contents ^ #()! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString ! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47' prior: 27944626! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifAbsent: [self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white ifTrue: ["not set" Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor)]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52' prior: 27945298! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:08' prior: 54331069! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:15' prior: 27927912! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. item ifNil: [^nil]. dir := item isPackage ifTrue: [model cache directoryForPackage: item] ifFalse: [model cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. " withLabel: item name, ' cache directory'." win openInWorld ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:52'! buildButtonBar | aRow btn | aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. self buttonSpecs do: [:spec | btn := self buildButtonNamed: spec first helpText: spec third action: spec second. aRow addMorphBack: btn] separatedBy: [aRow addTransparentSpacerOfSize: 3 at 0]. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:27'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 19:04' prior: 27928394! buildMorphicCategoriesList "Create the hierarchical list holding the category tree." | list | list := (SimpleHierarchicalListMorph on: self list: #categoryWrapperList selected: #selectedCategoryWrapper changeSelected: #selectedCategoryWrapper: menu: #categoriesMenu: keystroke: nil) autoDeselect: true; enableDrag: false; enableDrop: true; yourself. list setBalloonText: 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'. "list scroller submorphs do:[:each| list expandAll: each]." list adjustSubmorphPositions. ^ list! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 00:22' prior: 27929139! buildMorphicPackagesList "Create the hierarchical list holding the packages and releases." ^(SimpleHierarchicalListMorph on: self list: #packageWrapperList selected: #selectedItemWrapper changeSelected: #selectedItemWrapper: menu: #packagesMenu: keystroke: nil) autoDeselect: false; enableDrag: false; enableDrop: true; setBalloonText: 'This shows all packages with their releases that should be displayed according the current filter.'; yourself! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:13'! buildPackageButtonBar | aRow | "Somewhat patterned after IRCe's buttonRow method." aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'gk 5/5/2006 02:05' prior: 27929686! buildPackagePane "Create the text area to the right in the loader." | ptm | ptm := PluggableTextMorph on: self text: #contents accept: nil readSelection: nil "#packageSelection " menu: nil. ptm setBalloonText: 'This is where the selected package or package release is displayed.'. ptm lock. ^ptm! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:08' prior: 27930070! buildSearchPane "Cribbed from MessageNames>>inMorphicWindowWithInitialSearchString:" | typeInView searchButton typeInPane | typeInView := PluggableTextMorph on: self text: nil accept: #findPackage:notifying: readSelection: nil menu: nil. typeInView acceptOnCR: true; vResizing: #spaceFill; hResizing: #spaceFill; setTextMorphToSelectAllOnMouseEnter; askBeforeDiscardingEdits: false; setProperty: #alwaysAccept toValue: true. (typeInView respondsTo: #hideScrollBarsIndefinitely) ifTrue: [typeInView hideScrollBarsIndefinitely] ifFalse: [typeInView hideScrollBarIndefinitely]. searchButton := SimpleButtonMorph new target: typeInView; color: Color white; label: 'Search'; actionSelector: #accept; arguments: #(); yourself. typeInPane := AlignmentMorph newRow. typeInPane vResizing: #shrinkWrap; hResizing: #shrinkWrap; listDirection: #leftToRight; addMorphFront: searchButton; addTransparentSpacerOfSize: 6 @ 0; addMorphBack: typeInView; setBalloonText: 'Type into the pane, then press Search (or hit RETURN) to visit the next package matching what you typed.'. ^ typeInPane! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:24'! buttonSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.') ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.') ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.') ('Update' loadUpdates 'Update the package index from the servers.') ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (confirming each).') ('Help' help 'What is this?'))! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:11' prior: 27936393! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoader methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01' prior: 27933585! categoryWrapperList "Create the wrapper list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (model categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list collect: [:cat | SMCategoryWrapper with: cat model: self]! ! !SMLoader methodsFor: 'filter utilities' stamp: 'gk 7/10/2004 15:45' prior: 27913226! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 17:30' prior: 27930584! createWindow | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.3. horizDivide := 0.6. self addMorph: (self buildButtonBar borderWidth: 0) frame: (0.0 @ 0.0 corner: 1.0 @ buttonBarHeight). self addMorph: (self buildSearchPane borderWidth: 0) frame: (0.0 @ buttonBarHeight corner: vertDivide @ searchHeight). self addMorph: (self buildMorphicPackagesList borderWidth: 0) frame: (0.0 @ (buttonBarHeight + searchHeight) corner: vertDivide @ horizDivide). self addMorph: (self buildMorphicCategoriesList borderWidth: 0) frame: (0.0 @ horizDivide corner: vertDivide @ 1.0). self addMorph: (self buildPackagePane borderWidth: 0) frame: (vertDivide @ buttonBarHeight corner: 1.0 @ 1.0). self on: #mouseEnter send: #paneTransition: to: self. self on: #mouseLeave send: #paneTransition: to: self! ! !SMLoader methodsFor: 'interface' stamp: 'gk 7/12/2004 11:14' prior: 27931214! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoader methodsFor: 'interface' stamp: 'btr 12/1/2006 02:01'! defaultLabel ^'SqueakMap Package Loader'! ! !SMLoader methodsFor: 'actions' stamp: 'btr 11/22/2006 01:14' prior: 27917579! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoader methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 00:14' prior: 27923782! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #( #('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') From noreply at buildbot.pypy.org Sun May 18 00:28:42 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Sun, 18 May 2014 00:28:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Do correct forking after STMForkExecption Message-ID: <20140517222842.55D8D1C3306@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r834:f0e2abc44f4f Date: 2014-05-18 00:27 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f0e2abc44f4f/ Log: Do correct forking after STMForkExecption diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -258,7 +258,7 @@ s_context.push(nlr.value) except STMForkException as fork_exception: print "Fork requested" - #self.fork_interpreter_thread(fork_exception.w_frame, fork_exception.w_stm_process) + self.fork_interpreter_thread(fork_exception.w_frame, fork_exception.w_stm_process) def _get_adapted_tick_counter(self): # Normally, the tick counter is decremented by 1 for every message send. diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1470,8 +1470,8 @@ print "STM_FORK primitive called" - #wrapper.StmProcessWrapper(interp.space, w_rcvr).fork(s_frame.w_self()) - #rstm.should_break_transaction() + wrapper.StmProcessWrapper(interp.space, w_rcvr).fork(s_frame.w_self()) + rstm.should_break_transaction() # ___________________________________________________________________________ # BlockClosure Primitives From noreply at buildbot.pypy.org Sun May 18 10:51:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 May 2014 10:51:49 +0200 (CEST) Subject: [pypy-commit] pypy default: This test fails with gcc 4.9. Message-ID: <20140518085149.243861C3382@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71562:120248369f62 Date: 2014-05-18 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/120248369f62/ Log: This test fails with gcc 4.9. diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -12,6 +12,7 @@ _store_digit, _mask_digit, InvalidEndiannessError, InvalidSignednessError) from rpython.rlib.rfloat import NAN from rpython.rtyper.test.test_llinterp import interpret +from rpython.translator.c.test.test_standalone import StandaloneTests class TestRLong(object): @@ -849,3 +850,17 @@ py.test.raises(InvalidSignednessError, i.tobytes, 3, 'little', signed=False) py.test.raises(OverflowError, i.tobytes, 2, 'little', signed=True) + +class TestTranslated(StandaloneTests): + + def test_gcc_4_9(self): + MIN = -sys.maxint-1 + + def entry_point(argv): + print rbigint.fromint(MIN+1)._digits + print rbigint.fromint(MIN)._digits + return 0 + + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('hi there') + assert data == '[%d]\n[0, 1]\n' % sys.maxint From noreply at buildbot.pypy.org Sun May 18 10:51:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 May 2014 10:51:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Pfffff. This is enough to convince gcc 4.9 to produce correct code: do Message-ID: <20140518085150.6B44E1C3382@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71563:3cf384e86ef7 Date: 2014-05-18 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/3cf384e86ef7/ Log: Pfffff. This is enough to convince gcc 4.9 to produce correct code: do the "-" on the r_uint, not on the signed integer (because that might overflow). diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -177,7 +177,7 @@ if intval < 0: sign = -1 - ival = r_uint(-intval) + ival = -r_uint(intval) elif intval > 0: sign = 1 ival = r_uint(intval) From noreply at buildbot.pypy.org Sun May 18 14:50:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 May 2014 14:50:41 +0200 (CEST) Subject: [pypy-commit] pypy default: issue 1762: accept null bytes in the .py file named in the command-line, Message-ID: <20140518125041.698551D2D34@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71564:ec637e30bcd6 Date: 2014-05-18 14:50 +0200 http://bitbucket.org/pypy/pypy/changeset/ec637e30bcd6/ Log: issue 1762: accept null bytes in the .py file named in the command- line, in addition to files that are imported. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -590,6 +590,11 @@ # handle the case where no command/filename/module is specified # on the command-line. + try: + from _ast import PyCF_ACCEPT_NULL_BYTES + except ImportError: + PyCF_ACCEPT_NULL_BYTES = 0 + # update sys.path *after* loading site.py, in case there is a # "site.py" file in the script's directory. Only run this if we're # executing the interactive prompt, if we're running a script we @@ -613,7 +618,8 @@ def run_it(): co_python_startup = compile(startup, python_startup, - 'exec') + 'exec', + PyCF_ACCEPT_NULL_BYTES) exec co_python_startup in mainmodule.__dict__ mainmodule.__file__ = python_startup run_toplevel(run_it) @@ -626,7 +632,8 @@ else: # If not interactive, just read and execute stdin normally. def run_it(): - co_stdin = compile(sys.stdin.read(), '', 'exec') + co_stdin = compile(sys.stdin.read(), '', 'exec', + PyCF_ACCEPT_NULL_BYTES) exec co_stdin in mainmodule.__dict__ mainmodule.__file__ = '' success = run_toplevel(run_it) diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -22,3 +22,4 @@ PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 PyCF_ONLY_AST = 0x0400 +PyCF_ACCEPT_NULL_BYTES = 0x10000000 # PyPy only, for compile() diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -24,7 +24,8 @@ """ ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | - consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): + consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8 | + consts.PyCF_ACCEPT_NULL_BYTES): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) @@ -53,9 +54,10 @@ else: source = space.readbuf_w(w_source).as_str() - if '\x00' in source: - raise OperationError(space.w_TypeError, space.wrap( - "compile() expected string without null bytes")) + if not (flags & consts.PyCF_ACCEPT_NULL_BYTES): + if '\x00' in source: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: code = ec.compiler.compile_to_ast(source, filename, mode, flags) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -610,6 +610,16 @@ firstlineno = co.co_firstlineno assert firstlineno == 2 + def test_compile_null_bytes(self): + import _ast + raises(TypeError, compile, '\x00', 'mymod', 'exec', 0) + raises(SyntaxError, compile, '\x00', 'mymod', 'exec', + _ast.PyCF_ACCEPT_NULL_BYTES) + src = "#abc\x00def\n" + raises(TypeError, compile, src, 'mymod', 'exec') + raises(TypeError, compile, src, 'mymod', 'exec', 0) + compile(src, 'mymod', 'exec', _ast.PyCF_ACCEPT_NULL_BYTES) # works + def test_print_function(self): import __builtin__ import sys diff --git a/pypy/module/_ast/__init__.py b/pypy/module/_ast/__init__.py --- a/pypy/module/_ast/__init__.py +++ b/pypy/module/_ast/__init__.py @@ -6,6 +6,8 @@ interpleveldefs = { "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST, + "PyCF_ACCEPT_NULL_BYTES": + "space.wrap(%s)" % consts.PyCF_ACCEPT_NULL_BYTES, "__version__" : "space.wrap('82160')", # from CPython's svn. } appleveldefs = {} From noreply at buildbot.pypy.org Sun May 18 15:25:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 May 2014 15:25:22 +0200 (CEST) Subject: [pypy-commit] pypy default: issue 1752: the peek() method must not create a string slice. This leads Message-ID: <20140518132522.9C9C61C0231@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71565:3777204fff8e Date: 2014-05-18 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/3777204fff8e/ Log: issue 1752: the peek() method must not create a string slice. This leads to bogus complexity as soon as the buffer size is more than some small number. diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -458,9 +458,7 @@ return result def peek(self): - pos = self.pos - assert pos >= 0 - return self.buffer[pos:] + return (self.pos, self.buffer) def try_to_find_file_descriptor(self): return self.stream.try_to_find_file_descriptor() diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -234,11 +234,12 @@ while True: # "peeks" on the underlying stream to see how many characters # we can safely read without reading past an end-of-line - peeked = self.peek() - pn = peeked.find("\n") + startindex, peeked = self.peek() + assert 0 <= startindex <= len(peeked) + pn = peeked.find("\n", startindex) if pn < 0: pn = len(peeked) - c = self.read(pn + 1) + c = self.read(pn - startindex + 1) if not c: break result.append(c) @@ -265,7 +266,7 @@ pass def peek(self): - return '' + return (0, '') def try_to_find_file_descriptor(self): return -1 @@ -705,9 +706,7 @@ return "".join(chunks) def peek(self): - pos = self.pos - assert pos >= 0 - return self.buf[pos:] + return (self.pos, self.buf) write = PassThrough("write", flush_buffers=True) truncate = PassThrough("truncate", flush_buffers=True) @@ -970,12 +969,13 @@ while True: # "peeks" on the underlying stream to see how many characters # we can safely read without reading past an end-of-line - peeked = self.base.peek() - pn = peeked.find("\n") - pr = peeked.find("\r") + startindex, peeked = self.base.peek() + assert 0 <= startindex <= len(peeked) + pn = peeked.find("\n", startindex) + pr = peeked.find("\r", startindex) if pn < 0: pn = len(peeked) if pr < 0: pr = len(peeked) - c = self.read(min(pn, pr) + 1) + c = self.read(min(pn, pr) - startindex + 1) if not c: break result.append(c) @@ -1028,7 +1028,7 @@ self.buf = "" def peek(self): - return self.buf + return (0, self.buf) write = PassThrough("write", flush_buffers=True) truncate = PassThrough("truncate", flush_buffers=True) From noreply at buildbot.pypy.org Sun May 18 18:24:18 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 18 May 2014 18:24:18 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: deal with can_only_throw in HLOperation Message-ID: <20140518162418.2DD7D1D2D4E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71566:74ed98b37ed2 Date: 2014-05-18 07:58 +0100 http://bitbucket.org/pypy/pypy/changeset/74ed98b37ed2/ Log: deal with can_only_throw in HLOperation diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -468,20 +468,7 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.dispatch == 2: - arg1 = self.binding(op.args[0]) - arg2 = self.binding(op.args[1]) - binop = getattr(pair(arg1, arg2), op.opname, None) - can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.dispatch == 1: - arg1 = self.binding(op.args[0]) - opname = op.opname - if opname == 'contains': opname = 'op_contains' - unop = getattr(arg1, opname, None) - can_only_throw = annmodel.read_can_only_throw(unop, arg1) - else: - can_only_throw = None - + can_only_throw = op.get_can_only_throw(self) if can_only_throw is not None: candidates = can_only_throw candidate_exits = exits diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -13,7 +13,8 @@ from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc -from rpython.annotator.model import SomeTuple, AnnotatorError +from rpython.annotator.model import ( + SomeTuple, AnnotatorError, read_can_only_throw) from rpython.flowspace.specialcase import SPECIAL_CASES @@ -101,6 +102,9 @@ spec = type(self).get_specialization(*args_s) return spec(*args) + def get_can_only_throw(self, annotator): + return None + class PureOperation(HLOperation): pure = True @@ -155,12 +159,22 @@ pass raise AnnotatorError("Unknown operation") + def get_can_only_throw(self, annotator): + args_s = [annotator.binding(v) for v in self.args] + spec = type(self).get_specialization(*args_s) + return read_can_only_throw(spec, args_s[0]) + @classmethod def get_specialization(cls, s_arg, *_ignored): try: impl = getattr(s_arg, cls.opname) + def specialized(arg, *other_args): return impl(*[x.ann for x in other_args]) + try: + specialized.can_only_throw = impl.can_only_throw + except AttributeError: + pass return specialized except AttributeError: return cls._dispatch(type(s_arg)) @@ -172,10 +186,20 @@ @classmethod def get_specialization(cls, s_arg1, s_arg2, *_ignored): impl = getattr(pair(s_arg1, s_arg2), cls.opname) + def specialized(arg1, arg2, *other_args): return impl(*[x.ann for x in other_args]) + try: + specialized.can_only_throw = impl.can_only_throw + except AttributeError: + pass return specialized + def get_can_only_throw(self, annotator): + args_s = [annotator.binding(v) for v in self.args] + spec = type(self).get_specialization(*args_s) + return read_can_only_throw(spec, args_s[0], args_s[1]) + def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) From noreply at buildbot.pypy.org Sun May 18 18:24:19 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 18 May 2014 18:24:19 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register annotators for op.contains Message-ID: <20140518162419.79C661D2D4E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71567:41935c3af33b Date: 2014-05-18 17:23 +0100 http://bitbucket.org/pypy/pypy/changeset/41935c3af33b/ Log: register annotators for op.contains diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -37,12 +37,16 @@ r.set_knowntypedata(knowntypedata) return r + at op.contains.register(SomeObject) +def contains_SomeObject(obj, element): + return s_Bool +contains_SomeObject.can_only_throw = [] + class __extend__(SomeObject): def issubtype(self, s_cls): if hasattr(self, 'is_type_of'): instances = self.is_type_of - annotator = getbookkeeper().annotator return builtin.builtin_isinstance(instances[0].ann, s_cls, [x.value for x in instances]) if self.is_constant() and s_cls.is_constant(): @@ -134,10 +138,6 @@ def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") - def op_contains(self, s_element): - return s_Bool - op_contains.can_only_throw = [] - def hint(self, *args_s): return self @@ -241,6 +241,12 @@ items = self.items[s_start.const:s_stop.const] return SomeTuple(items) + at op.contains.register(SomeList) +def contains_SomeList(obj, element): + obj.ann.listdef.generalize(element.ann) + return s_Bool +contains_SomeList.can_only_throw = [] + class __extend__(SomeList): @@ -288,11 +294,6 @@ def getanyitem(self): return self.listdef.read_item() - def op_contains(self, s_element): - self.listdef.generalize(s_element) - return s_Bool - op_contains.can_only_throw = [] - def hint(self, *args_s): hints = args_s[-1].const if 'maxlength' in hints: @@ -331,6 +332,21 @@ getattr(s_stop, 'const', 0) != -1: raise AnnotatorError("%s: not proven to have non-negative stop" % error) +def _can_only_throw(dct, *ignore): + if dct.ann.dictdef.dictkey.custom_eq_hash: + return None # r_dict: can throw anything + return [] # else: no possible exception + + + at op.contains.register(SomeDict) +def contains_SomeDict(dct, element): + dct.ann.dictdef.generalize_key(element.ann) + if dct.ann._is_empty(): + s_bool = SomeBool() + s_bool.const = False + return s_bool + return s_Bool +contains_SomeDict.can_only_throw = _can_only_throw class __extend__(SomeDict): @@ -410,19 +426,19 @@ self.dictdef.generalize_value(s_dfl) return self.dictdef.read_value() - def _can_only_throw(self, *ignore): - if self.dictdef.dictkey.custom_eq_hash: - return None # r_dict: can throw anything - return [] # else: no possible exception - - def op_contains(self, s_element): - self.dictdef.generalize_key(s_element) - if self._is_empty(): - s_bool = SomeBool() - s_bool.const = False - return s_bool - return s_Bool - op_contains.can_only_throw = _can_only_throw + at op.contains.register(SomeString) + at op.contains.register(SomeUnicodeString) +def contains_String(string, char): + if char.ann.is_constant() and char.ann.const == "\0": + r = SomeBool() + knowntypedata = {} + add_knowntypedata(knowntypedata, False, [string.value], + string.ann.nonnulify()) + r.set_knowntypedata(knowntypedata) + return r + else: + return contains_SomeObject(string, char) +contains_String.can_only_throw = [] class __extend__(SomeString, @@ -497,19 +513,6 @@ result = self.basestringclass(no_nul=self.no_nul) return result - def op_contains(self, s_element): - if s_element.is_constant() and s_element.const == "\0": - r = SomeBool() - bk = getbookkeeper() - op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) - knowntypedata = {} - add_knowntypedata(knowntypedata, False, [op.args[0]], self.nonnulify()) - r.set_knowntypedata(knowntypedata) - return r - else: - return SomeObject.op_contains(self, s_element) - op_contains.can_only_throw = [] - def method_format(self, *args): raise AnnotatorError("Method format() is not RPython") diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -59,6 +59,7 @@ def register(cls, Some_cls): def decorator(func): cls._registry[Some_cls] = func + return func return decorator @@ -416,18 +417,15 @@ add_operator('newslice', 3) add_operator('hint', None, dispatch=1) -class Contains(PureOperation): +class Contains(SingleDispatchMixin, PureOperation): opname = 'contains' arity = 2 pyfunc = staticmethod(operator.contains) - # XXX "contains" clash with SomeObject method + # XXX "contains" clashes with SomeObject method @classmethod def get_specialization(cls, s_seq, s_elem): - impl = s_seq.op_contains - def specialized(seq, elem): - return impl(elem.ann) - return specialized + return cls._dispatch(type(s_seq)) class NewDict(HLOperation): From noreply at buildbot.pypy.org Sun May 18 18:29:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 18 May 2014 18:29:15 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Pfffff. This is enough to convince gcc 4.9 to produce correct code: do Message-ID: <20140518162915.85F771D2D4F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71568:837f6ad4dbdd Date: 2014-05-18 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/837f6ad4dbdd/ Log: Pfffff. This is enough to convince gcc 4.9 to produce correct code: do the "-" on the r_uint, not on the signed integer (because that might overflow). (grafted from 3cf384e86ef7f0b1d78d54a37853f94a9bf74aaf) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -177,7 +177,7 @@ if intval < 0: sign = -1 - ival = r_uint(-intval) + ival = -r_uint(intval) elif intval > 0: sign = 1 ival = r_uint(intval) From noreply at buildbot.pypy.org Sun May 18 22:33:54 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 18 May 2014 22:33:54 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: create pairmro() Message-ID: <20140518203354.C5AB71C01E8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71569:1a302091ddcf Date: 2014-05-18 20:12 +0100 http://bitbucket.org/pypy/pypy/changeset/1a302091ddcf/ Log: create pairmro() diff --git a/rpython/tool/pairtype.py b/rpython/tool/pairtype.py --- a/rpython/tool/pairtype.py +++ b/rpython/tool/pairtype.py @@ -61,3 +61,13 @@ bases = tuple(bases1 + bases2) or (tuple,) # 'tuple': ultimate base pair = pairtypecache[cls1, cls2] = extendabletype(name, bases, {}) return pair + +def pairmro(cls1, cls2): + """ + Return the resolution order on pairs of types for double dispatch. + + This order is compatible with the mro of pairtype(cls1, cls2). + """ + for base2 in cls2.__mro__: + for base1 in cls1.__mro__: + yield (base1, base2) diff --git a/rpython/tool/test/test_pairtype.py b/rpython/tool/test/test_pairtype.py --- a/rpython/tool/test/test_pairtype.py +++ b/rpython/tool/test/test_pairtype.py @@ -1,7 +1,6 @@ +from rpython.tool.pairtype import pairtype, pair, extendabletype, pairmro -from rpython.tool.pairtype import pairtype, pair, extendabletype - -def test_binop(): +def test_binop(): ### Binary operation example class __extend__(pairtype(int, int)): def add((x, y)): @@ -13,16 +12,16 @@ def add((x, y)): return 'bool: %s+%s' % (x, y) - assert pair(3,4).add() == 'integer: 3+4' - assert pair(3,4).sub() == 'integer: 3-4' - assert pair(3,True).add() == 'integer: 3+True' - assert pair(3,True).sub() == 'integer: 3-True' - assert pair(False,4).add() == 'integer: False+4' - assert pair(False,4).sub() == 'integer: False-4' - assert pair(False,True).add() == 'bool: False+True' - assert pair(False,True).sub() == 'integer: False-True' + assert pair(3, 4).add() == 'integer: 3+4' + assert pair(3, 4).sub() == 'integer: 3-4' + assert pair(3, True).add() == 'integer: 3+True' + assert pair(3, True).sub() == 'integer: 3-True' + assert pair(False, 4).add() == 'integer: False+4' + assert pair(False, 4).sub() == 'integer: False-4' + assert pair(False, True).add() == 'bool: False+True' + assert pair(False, True).sub() == 'integer: False-True' -def test_somebuiltin(): +def test_somebuiltin(): ### Operation on built-in types class MiniPickler: def __init__(self): @@ -48,7 +47,7 @@ pair(p, [1, 2, ['hello', 3]]).write() assert p.data == ['I1', 'I2', 'Shello', 'I3', 'L2', 'L3'] -def test_some_multimethod(): +def test_some_multimethod(): ### Another multimethod example class Block: def __init__(self, exit): @@ -57,7 +56,7 @@ pass class Switch: pass - + class C_Generator: def __init__(self): self.lines = [] @@ -78,7 +77,7 @@ g = C_Generator() pair(g, Block(Switch())).emit(['v1', 'v2']) - assert g.lines == ["C code for block", "switch (v5) { ... }"] + assert g.lines == ["C code for block", "switch (v5) { ... }"] class Lisp_Generator: def __init__(self): @@ -95,16 +94,22 @@ def test_multiple_extend(): class A: __metaclass__ = extendabletype + class B: __metaclass__ = extendabletype - class __extend__(A,B): - + class __extend__(A, B): def f(self): pass assert hasattr(A, 'f') assert hasattr(B, 'f') - - +def test_pairmro(): + class A(object): pass + class A2(A): pass + class A3(A2): pass + class B(object): pass + class B2(B): pass + parent_pairtypes = pairtype(A3, B2).__mro__[:-2] + assert (tuple(pairtype(a, b) for a, b in pairmro(A3, B2)) == parent_pairtypes) From noreply at buildbot.pypy.org Sun May 18 22:33:56 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 18 May 2014 22:33:56 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: create DoubleDispatchRegistry Message-ID: <20140518203356.1DAE71C01E8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71570:9a6b10d71995 Date: 2014-05-18 21:33 +0100 http://bitbucket.org/pypy/pypy/changeset/9a6b10d71995/ Log: create DoubleDispatchRegistry diff --git a/rpython/tool/pairtype.py b/rpython/tool/pairtype.py --- a/rpython/tool/pairtype.py +++ b/rpython/tool/pairtype.py @@ -71,3 +71,26 @@ for base2 in cls2.__mro__: for base1 in cls1.__mro__: yield (base1, base2) + +class DoubleDispatchRegistry(object): + """ + A mapping of pairs of types to arbitrary objects respecting inheritance + """ + def __init__(self): + self._registry = {} + self._cache = {} + + def __getitem__(self, clspair): + try: + return self._cache[clspair] + except KeyError: + cls1, cls2 = clspair + for c1, c2 in pairmro(cls1, cls2): + if (c1, c2) in self._cache: + return self._cache[(c1, c2)] + else: + raise + + def __setitem__(self, clspair, value): + self._registry[clspair] = value + self._cache = self._registry.copy() diff --git a/rpython/tool/test/test_pairtype.py b/rpython/tool/test/test_pairtype.py --- a/rpython/tool/test/test_pairtype.py +++ b/rpython/tool/test/test_pairtype.py @@ -1,4 +1,5 @@ -from rpython.tool.pairtype import pairtype, pair, extendabletype, pairmro +from rpython.tool.pairtype import ( + pairtype, pair, extendabletype, pairmro, DoubleDispatchRegistry) def test_binop(): ### Binary operation example @@ -113,3 +114,18 @@ class B2(B): pass parent_pairtypes = pairtype(A3, B2).__mro__[:-2] assert (tuple(pairtype(a, b) for a, b in pairmro(A3, B2)) == parent_pairtypes) + +def test_doubledispatch(): + class A(object): pass + class A2(A): pass + class A3(A2): pass + class B(object): pass + class B2(B): pass + reg = DoubleDispatchRegistry() + reg[object, object] = "default" + assert reg[A3, B2] == "default" + reg[A2, B2] = "A2-B2" + assert reg[A, B2] == "default" + assert reg[A3, B2] == "A2-B2" + reg[A3, B] = "A3-B" + assert reg[A3, B2] == "A2-B2" # note that A2,B2 wins over A3,B From noreply at buildbot.pypy.org Mon May 19 01:12:23 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 19 May 2014 01:12:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix. Message-ID: <20140518231223.12AC71C01E8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71572:b90dbe86a285 Date: 2014-05-19 01:11 +0200 http://bitbucket.org/pypy/pypy/changeset/b90dbe86a285/ Log: Fix. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -637,16 +637,14 @@ self.readonly = readonly def getlength(self): - return self.array.len + return self.array.len * self.itemsize def getitem(self, index): - resbuf = ['\x00'] * self.itemsize array = self.array data = array._charbuf_start() - for i in xrange(self.itemsize): - resbuf[i] = data[index + i] + char = data[index] array._charbuf_stop() - return ''.join(resbuf) + return char def setitem(self, index, char): array = self.array diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -73,11 +73,11 @@ def as_str(self): buf = self.buf - n_bytes = buf.getlength() * buf.itemsize + n_bytes = buf.getlength() return buf.getslice(0, n_bytes, 1, n_bytes) def getlength(self): - return self.buf.getlength() + return self.buf.getlength() // self.buf.itemsize def getslice(self, start, stop): if start < 0: @@ -109,7 +109,10 @@ if step not in (0, 1): raise OperationError(space.w_NotImplementedError, space.wrap("")) if step == 0: # index only - return space.wrapbytes(self.buf.getitem(start)) + a = start * self.buf.itemsize + b = a + self.buf.itemsize + return space.wrapbytes( + ''.join([self.buf.getitem(i) for i in range(a, b)])) res = self.getslice(start, stop) return space.wrap(res) From noreply at buildbot.pypy.org Mon May 19 02:30:49 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 02:30:49 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: implement double dispatch in consider_op() Message-ID: <20140519003049.8A63D1C0231@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71573:7c1072611e34 Date: 2014-05-19 00:32 +0100 http://bitbucket.org/pypy/pypy/changeset/7c1072611e34/ Log: implement double dispatch in consider_op() diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -7,7 +7,7 @@ import operator import sys import types -from rpython.tool.pairtype import pair +from rpython.tool.pairtype import pair, DoubleDispatchRegistry from rpython.rlib.unroll import unrolling_iterable, _unroller from rpython.tool.sourcetools import compile2 from rpython.flowspace.model import (Constant, WrapException, const, Variable, @@ -54,13 +54,10 @@ type.__init__(cls, name, bases, attrdict) if hasattr(cls, 'opname'): setattr(op, cls.opname, cls) - cls._registry = {} - - def register(cls, Some_cls): - def decorator(func): - cls._registry[Some_cls] = func - return func - return decorator + if cls.dispatch == 1: + cls._registry = {} + elif cls.dispatch == 2: + cls._registry = DoubleDispatchRegistry() class HLOperation(SpaceOperation): @@ -152,6 +149,13 @@ dispatch = 1 @classmethod + def register(cls, Some_cls): + def decorator(func): + cls._registry[Some_cls] = func + return func + return decorator + + @classmethod def _dispatch(cls, Some_cls): for c in Some_cls.__mro__: try: @@ -185,16 +189,26 @@ dispatch = 2 @classmethod + def register(cls, Some1, Some2): + def decorator(func): + cls._registry[Some1, Some2] = func + return func + return decorator + + @classmethod def get_specialization(cls, s_arg1, s_arg2, *_ignored): - impl = getattr(pair(s_arg1, s_arg2), cls.opname) + try: + impl = getattr(pair(s_arg1, s_arg2), cls.opname) - def specialized(arg1, arg2, *other_args): - return impl(*[x.ann for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw + def specialized(arg1, arg2, *other_args): + return impl(*[x.ann for x in other_args]) + try: + specialized.can_only_throw = impl.can_only_throw + except AttributeError: + pass + return specialized except AttributeError: - pass - return specialized + return cls._registry[type(s_arg1), type(s_arg2)] def get_can_only_throw(self, annotator): args_s = [annotator.binding(v) for v in self.args] From noreply at buildbot.pypy.org Mon May 19 02:30:50 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 02:30:50 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register specialised annotators for op.is_ Message-ID: <20140519003050.DCD9F1C0231@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71574:7b1cd4d2bbb9 Date: 2014-05-19 01:11 +0100 http://bitbucket.org/pypy/pypy/changeset/7b1cd4d2bbb9/ Log: register specialised annotators for op.is_ diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -24,6 +24,40 @@ if oper.dispatch == 2]) + at op.is_.register(SomeObject, SomeObject) +def is__default(obj1, obj2): + r = SomeBool() + s_obj1 = obj1.ann + s_obj2 = obj2.ann + if s_obj2.is_constant(): + if s_obj1.is_constant(): + r.const = s_obj1.const is s_obj2.const + if s_obj2.const is None and not s_obj1.can_be_none(): + r.const = False + elif s_obj1.is_constant(): + if s_obj1.const is None and not s_obj2.can_be_none(): + r.const = False + knowntypedata = {} + + def bind(src_obj, tgt_obj): + if hasattr(tgt_obj.ann, 'is_type_of') and src_obj.ann.is_constant(): + add_knowntypedata( + knowntypedata, True, + [inst.value for inst in tgt_obj.ann.is_type_of], + getbookkeeper().valueoftype(src_obj.ann.const)) + add_knowntypedata(knowntypedata, True, [tgt_obj.value], src_obj.ann) + s_nonnone = tgt_obj.ann + if (src_obj.ann.is_constant() and src_obj.ann.const is None and + tgt_obj.ann.can_be_none()): + s_nonnone = tgt_obj.ann.nonnoneify() + add_knowntypedata(knowntypedata, + False, [tgt_obj.value], s_nonnone) + + bind(obj2, obj1) + bind(obj1, obj2) + r.set_knowntypedata(knowntypedata) + return r + class __extend__(pairtype(SomeObject, SomeObject)): def union((obj1, obj2)): @@ -94,47 +128,6 @@ else: return SomeInteger() - def is_((obj1, obj2)): - r = SomeBool() - if obj2.is_constant(): - if obj1.is_constant(): - r.const = obj1.const is obj2.const - if obj2.const is None and not obj1.can_be_none(): - r.const = False - elif obj1.is_constant(): - if obj1.const is None and not obj2.can_be_none(): - r.const = False - # XXX HACK HACK HACK - # XXX HACK HACK HACK - # XXX HACK HACK HACK - bk = getbookkeeper() - if bk is not None: # for testing - op = bk._find_current_op("is_", 2) - knowntypedata = {} - annotator = bk.annotator - - def bind(src_obj, tgt_obj, tgt_arg): - if hasattr(tgt_obj, 'is_type_of') and src_obj.is_constant(): - add_knowntypedata( - knowntypedata, True, - [inst.value for inst in tgt_obj.is_type_of], - bk.valueoftype(src_obj.const)) - - assert annotator.binding(op.args[tgt_arg]) == tgt_obj - add_knowntypedata(knowntypedata, True, [op.args[tgt_arg]], src_obj) - - nonnone_obj = tgt_obj - if src_obj.is_constant() and src_obj.const is None and tgt_obj.can_be_none(): - nonnone_obj = tgt_obj.nonnoneify() - - add_knowntypedata(knowntypedata, False, [op.args[tgt_arg]], nonnone_obj) - - bind(obj2, obj1, 0) - bind(obj1, obj2, 1) - r.set_knowntypedata(knowntypedata) - - return r - def divmod((obj1, obj2)): return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) @@ -742,25 +735,24 @@ s_self = unionof(bltn1.s_self, bltn2.s_self) return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) + at op.is_.register(SomePBC, SomePBC) +def is__PBC_PBC(pbc1, pbc2): + s = is__default(pbc1, pbc2) + if not s.is_constant(): + if not pbc1.ann.can_be_None or not pbc2.ann.can_be_None: + for desc in pbc1.ann.descriptions: + if desc in pbc2.ann.descriptions: + break + else: + s.const = False # no common desc in the two sets + return s + class __extend__(pairtype(SomePBC, SomePBC)): - def union((pbc1, pbc2)): d = pbc1.descriptions.copy() d.update(pbc2.descriptions) return SomePBC(d, can_be_None = pbc1.can_be_None or pbc2.can_be_None) - def is_((pbc1, pbc2)): - thistype = pairtype(SomePBC, SomePBC) - s = super(thistype, pair(pbc1, pbc2)).is_() - if not s.is_constant(): - if not pbc1.can_be_None or not pbc2.can_be_None: - for desc in pbc1.descriptions: - if desc in pbc2.descriptions: - break - else: - s.const = False # no common desc in the two sets - return s - class __extend__(pairtype(SomeImpossibleValue, SomeObject)): def union((imp1, obj2)): return obj2 diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -20,25 +20,6 @@ class C(object): pass -class DummyClassDef: - def __init__(self, cls=C): - self.cls = cls - self.name = cls.__name__ - -si0 = SomeInstance(DummyClassDef(), True) -si1 = SomeInstance(DummyClassDef()) -sTrue = SomeBool() -sTrue.const = True -sFalse = SomeBool() -sFalse.const = False - -def test_is_None(): - assert pair(s_None, s_None).is_() == sTrue - assert pair(si1, s_None).is_() == sFalse - assert pair(si0, s_None).is_() != sTrue - assert pair(si0, s_None).is_() != sFalse - assert pair(si0, s_None).is_() == SomeBool() - def test_equality(): assert s1 != s2 != s3 != s4 != s5 != s6 assert s1 == SomeType() From noreply at buildbot.pypy.org Mon May 19 02:30:52 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 02:30:52 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: fix Message-ID: <20140519003052.377AD1C0231@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71575:e1a1039ebe80 Date: 2014-05-19 01:20 +0100 http://bitbucket.org/pypy/pypy/changeset/e1a1039ebe80/ Log: fix diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -18,6 +18,7 @@ UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) +UNARY_OPERATIONS.remove('contains') @op.type.register(SomeObject) def type_SomeObject(arg): From noreply at buildbot.pypy.org Mon May 19 02:30:53 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 02:30:53 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: fix contains_SomeDict.can_only_throw Message-ID: <20140519003053.5FF251C0231@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71576:4757c82f9805 Date: 2014-05-19 01:30 +0100 http://bitbucket.org/pypy/pypy/changeset/4757c82f9805/ Log: fix contains_SomeDict.can_only_throw diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -333,12 +333,12 @@ getattr(s_stop, 'const', 0) != -1: raise AnnotatorError("%s: not proven to have non-negative stop" % error) -def _can_only_throw(dct, *ignore): - if dct.ann.dictdef.dictkey.custom_eq_hash: + +def _can_only_throw(s_dct, *ignore): + if s_dct.dictdef.dictkey.custom_eq_hash: return None # r_dict: can throw anything return [] # else: no possible exception - @op.contains.register(SomeDict) def contains_SomeDict(dct, element): dct.ann.dictdef.generalize_key(element.ann) From noreply at buildbot.pypy.org Mon May 19 02:34:36 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 19 May 2014 02:34:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Use new 'except ... as ...:' syntax to reduce diff with py3k. Message-ID: <20140519003436.2C91B1C0231@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r71577:7cb84eed6c64 Date: 2014-05-19 01:34 +0200 http://bitbucket.org/pypy/pypy/changeset/7cb84eed6c64/ Log: Use new 'except ... as ...:' syntax to reduce diff with py3k. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -85,7 +85,7 @@ if softspace: stdout.write('\n') - except SystemExit, e: + except SystemExit as e: handle_sys_exit(e) except: display_exception() @@ -611,7 +611,7 @@ f = open(python_startup) startup = f.read() f.close() - except IOError, e: + except IOError as e: print >> sys.stderr, "Could not open PYTHONSTARTUP" print >> sys.stderr, "IOError:", e else: @@ -667,7 +667,7 @@ args = (execfile, filename, mainmodule.__dict__) success = run_toplevel(*args) - except SystemExit, e: + except SystemExit as e: status = e.code if inspect_requested(): display_exception() @@ -683,7 +683,7 @@ readenv and os.getenv('PYPY_IRC_TOPIC')) success = run_toplevel(interactive_console, mainmodule, quiet=not irc_topic) - except SystemExit, e: + except SystemExit as e: status = e.code else: status = not success @@ -733,10 +733,10 @@ setup_bootstrap_path(executable) try: cmdline = parse_command_line(argv) - except CommandLineError, e: + except CommandLineError as e: print_error(str(e)) return 2 - except SystemExit, e: + except SystemExit as e: return e.code or 0 setup_and_fix_paths(**cmdline) return run_command_line(**cmdline) From noreply at buildbot.pypy.org Mon May 19 02:34:37 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 19 May 2014 02:34:37 +0200 (CEST) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20140519003437.AF4811C0231@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r71578:f481a6c8308a Date: 2014-05-19 02:00 +0200 http://bitbucket.org/pypy/pypy/changeset/f481a6c8308a/ Log: hg merge default diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -1,70 +1,77 @@ -====================== -Transactional Memory -====================== + +============================= +Software Transactional Memory +============================= .. contents:: This page is about ``pypy-stm``, a special in-development version of PyPy which can run multiple independent CPU-hungry threads in the same -process in parallel. It is side-stepping what is known in the Python -world as the "global interpreter lock (GIL)" problem. +process in parallel. It is a solution to what is known in the Python +world as the "global interpreter lock (GIL)" problem --- it is an +implementation of Python without the GIL. -"STM" stands for Software Transactional Memory, the technique used +"STM" stands for Software `Transactional Memory`_, the technique used internally. This page describes ``pypy-stm`` from the perspective of a user, describes work in progress, and finally gives references to more implementation details. -This work was done mostly by Remi Meier and Armin Rigo. Thanks to all -donors for crowd-funding the work so far! Please have a look at the -`2nd call for donation`_. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the `2nd call +for donation`_. +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory .. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% slower than a +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). -What you get in exchange for this slow-down is that ``pypy-stm`` runs -any multithreaded Python program on multiple CPUs at once. Programs -running two threads or more in parallel should ideally run faster than -in a regular PyPy, either now or soon as issues are fixed. In one way, -that's all there is to it: this is a GIL-less Python, feel free to -`download and try it`__. However, the deeper idea behind the -``pypy-stm`` project is to improve what is so far the state-of-the-art -for using multiple CPUs, which for cases where separate processes don't -work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is pushing forward an approach to *hide* the threads, as -described below in `atomic sections`_. +The benefit is that the resulting ``pypy-stm`` can execute multiple +threads of Python code in parallel. Programs running two threads or +more in parallel should ideally run faster than in a regular PyPy +(either now, or soon as bugs are fixed). +* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use + it as a drop-in replacement and multithreaded programs will run on + multiple cores. -.. __: +* ``pypy-stm`` does not impose any special API to the user, but it + provides a new pure Python module called `transactional_memory`_ with + features to inspect the state or debug conflicts_ that prevent + parallelization. This module can also be imported on top of a non-STM + PyPy or CPython. -Current status -============== +* Building on top of the way the GIL is removed, we will talk + about `Atomic sections, Transactions, etc.: a better way to write + parallel programs`_. + + +Getting Started +=============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu 12.04 binary -here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, -but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel, -in other words it is running a thread pool up to 4 threads emulating normal -threads. +interested in trying it out, you can download a Ubuntu binary here__ +(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are +release mode, but not stripped of debug symbols). The current version +supports four "segments", which means that it will run up to four +threads in parallel. To build a version from sources, you first need to compile a custom -version of clang; we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +version of clang(!); we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 `` for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for the very extensive usage that pypy-stm does of a -clang-only feature (without them, you get crashes of clang). Then get +they are fixes for a clang-only feature that hasn't been used so heavily +in the past (without the patches, you get crashes of clang). Then get the branch `stmgc-c7`_ of PyPy and run:: rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py @@ -75,23 +82,26 @@ .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ -Caveats: +.. _caveats: -* So far, small examples work fine, but there are still a number of - bugs. We're busy fixing them. +Current status +-------------- + +* So far, small examples work fine, but there are still a few bugs. + We're busy fixing them as we find them; feel free to `report bugs`_. * Currently limited to 1.5 GB of RAM (this is just a parameter in - `core.h`__). Memory overflows are not detected correctly, so may - cause segmentation faults. + `core.h`__). Memory overflows are not correctly handled; they cause + segfaults. -* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, - which is "only" bad). Moreover, you should run it with a command like - ``pypy-stm --jit trace_limit=60000 args...``; the default value of - 6000 for ``trace_limit`` is currently too low (6000 should become - reasonable again as we improve). Also, in order to produce machine - code, the JIT needs to enter a special single-threaded mode for now. - This all means that you *will* get very bad performance results if - your program doesn't run for *many* seconds for now. +* The JIT warm-up time improved recently but is still bad. In order to + produce machine code, the JIT needs to enter a special single-threaded + mode for now. This means that you will get bad performance results if + your program doesn't run for several seconds, where *several* can mean + *many.* When trying benchmarks, be sure to check that you have + reached the warmed state, i.e. the performance is not improving any + more. This should be clear from the fact that as long as it's + producing more machine code, ``pypy-stm`` will run on a single core. * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large @@ -108,111 +118,197 @@ * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in JIT-generated machine code). But the overall bookkeeping logic could - see more improvements (see Statistics_ below). - -* You can use `atomic sections`_, but the most visible missing thing is - that you don't get reports about the "conflicts" you get. This would - be the first thing that you need in order to start using atomic - sections more extensively. Also, for now: for better results, try to - explicitly force a transaction break just before (and possibly after) - each large atomic section, with ``time.sleep(0)``. + see more improvements (see `Low-level statistics`_ below). * Forking the process is slow because the complete memory needs to be - copied manually right now. + copied manually. A warning is printed to this effect. -* Very long-running processes should eventually crash on an assertion - error because of a non-implemented overflow of an internal 29-bit - number, but this requires at the very least ten hours --- more - probably, several days or more. +* Very long-running processes (on the order of days) will eventually + crash on an assertion error because of a non-implemented overflow of + an internal 29-bit number. .. _`report bugs`: https://bugs.pypy.org/ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h -Statistics +User Guide ========== + -When a non-main thread finishes, you get statistics printed to stderr, -looking like that:: +Drop-in replacement +------------------- - thread 0x7f73377fe600: - outside transaction 42182 0.506 s - run current 85466 0.000 s - run committed 34262 3.178 s - run aborted write write 6982 0.083 s - run aborted write read 550 0.005 s - run aborted inevitable 388 0.010 s - run aborted other 0 0.000 s - wait free segment 0 0.000 s - wait write read 78 0.027 s - wait inevitable 887 0.490 s - wait other 0 0.000 s - bookkeeping 51418 0.606 s - minor gc 162970 1.135 s - major gc 1 0.019 s - sync pause 59173 1.738 s - spin loop 129512 0.094 s +Multithreaded, CPU-intensive Python programs should work unchanged on +``pypy-stm``. They will run using multiple CPU cores in parallel. -The first number is a counter; the second number gives the associated -time (the amount of real time that the thread was in this state; the sum -of all the times should be equal to the total time between the thread's -start and the thread's end). The most important points are "run -committed", which gives the amount of useful work, and "outside -transaction", which should give the time spent e.g. in library calls -(right now it seems to be a bit larger than that; to investigate). -Everything else is overhead of various forms. (Short-, medium- and -long-term future work involves reducing this overhead :-) +The existing semantics of the GIL (Global Interpreter Lock) are +unchanged: although running on multiple cores in parallel, ``pypy-stm`` +gives the illusion that threads are run serially, with switches only +occurring between bytecodes, not in the middle of them. Programs can +rely on this: using ``shared_list.append()/pop()`` or +``shared_dict.setdefault()`` as synchronization mecanisms continues to +work as expected. -These statistics are not printed out for the main thread, for now. +This works by internally considering the points where a standard PyPy or +CPython would release the GIL, and replacing them with the boundaries of +"transaction". Like their database equivalent, multiple transactions +can execute in parallel, but will commit in some serial order. They +appear to behave as if they were completely run in this serialization +order. Atomic sections -=============== +--------------- -While one of the goal of pypy-stm is to give a GIL-free but otherwise -unmodified Python, the other goal is to push for a better way to use -multithreading. For this, you (as the Python programmer) get an API -in the ``__pypy__.thread`` submodule: +PyPy supports *atomic sections,* which are blocks of code which you want +to execute without "releasing the GIL". *This is experimental and may +be removed in the future.* In STM terms, this means blocks of code that +are executed while guaranteeing that the transaction is not interrupted +in the middle. -* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in - a ``with __pypy__.thread.atomic:`` statement). It runs the whole - block of code without breaking the current transaction --- from - the point of view of a regular CPython/PyPy, this is equivalent to - saying that the GIL will not be released at all between the start and - the end of this block of code. +Here is a usage example:: -The obvious usage is to use atomic blocks in the same way as one would -use locks: to protect changes to some shared data, you do them in a -``with atomic`` block, just like you would otherwise do them in a ``with -mylock`` block after ``mylock = thread.allocate_lock()``. This allows -you not to care about acquiring the correct locks in the correct order; -it is equivalent to having only one global lock. This is how -transactional memory is `generally described`__: as a way to efficiently -execute such atomic blocks, running them in parallel while giving the -illusion that they run in some serial order. + with __pypy__.thread.atomic: + assert len(lst1) == 10 + x = lst1.pop(0) + lst1.append(x) -.. __: http://en.wikipedia.org/wiki/Transactional_memory +In this (bad) example, we are sure that the item popped off one end of +the list is appened again at the other end atomically. It means that +another thread can run ``len(lst1)`` or ``x in lst1`` without any +particular synchronization, and always see the same results, +respectively ``10`` and ``True``. It will never see the intermediate +state where ``lst1`` only contains 9 elements. Atomic sections are +similar to re-entrant locks (they can be nested), but additionally they +protect against the concurrent execution of *any* code instead of just +code that happens to be protected by the same lock in other threads. -However, the less obvious intended usage of atomic sections is as a -wide-ranging replacement of explicit threads. You can turn a program -that is not multi-threaded at all into a program that uses threads -internally, together with large atomic sections to keep the behavior -unchanged. This capability can be hidden in a library or in the -framework you use; the end user's code does not need to be explicitly -aware of using threads. For a simple example of this, see -`transaction.py`_ in ``lib_pypy``. The idea is that if you have a -program where the function ``f(key, value)`` runs on every item of some -big dictionary, you can replace the loop with:: +Note that the notion of atomic sections is very strong. If you write +code like this:: + + with __pypy__.thread.atomic: + time.sleep(10) + +then, if you think about it as if we had a GIL, you are executing a +10-seconds-long atomic transaction without releasing the GIL at all. +This prevents all other threads from progressing at all. While it is +not strictly true in ``pypy-stm``, the exact rules for when other +threads can progress or not are rather complicated; you have to consider +it likely that such a piece of code will eventually block all other +threads anyway. + +Note that if you want to experiment with ``atomic``, you may have to add +manually a transaction break just before the atomic block. This is +because the boundaries of the block are not guaranteed to be the +boundaries of the transaction: the latter is at least as big as the +block, but maybe bigger. Therefore, if you run a big atomic block, it +is a good idea to break the transaction just before. This can be done +e.g. by the hack of calling ``time.sleep(0)``. (This may be fixed at +some point.) + +There are also issues with the interaction of locks and atomic blocks. +This can be seen if you write to files (which have locks), including +with a ``print`` to standard output. If one thread tries to acquire a +lock while running in an atomic block, and another thread has got the +same lock, then the former may fail with a ``thread.error``. The reason +is that "waiting" for some condition to become true --while running in +an atomic block-- does not really make sense. For now you can work +around it by making sure that, say, all your prints are either in an +``atomic`` block or none of them are. (This kind of issue is +theoretically hard to solve.) + + +Locks +----- + +**Not Implemented Yet** + +The thread module's locks have their basic semantic unchanged. However, +using them (e.g. in ``with my_lock:`` blocks) starts an alternative +running mode, called `Software lock elision`_. This means that PyPy +will try to make sure that the transaction extends until the point where +the lock is released, and if it succeeds, then the acquiring and +releasing of the lock will be "elided". This means that in this case, +the whole transaction will technically not cause any write into the lock +object --- it was unacquired before, and is still unacquired after the +transaction. + +This is specially useful if two threads run ``with my_lock:`` blocks +with the same lock. If they each run a transaction that is long enough +to contain the whole block, then all writes into the lock will be elided +and the two transactions will not conflict with each other. As usual, +they will be serialized in some order: one of the two will appear to run +before the other. Simply, each of them executes an "acquire" followed +by a "release" in the same transaction. As explained above, the lock +state goes from "unacquired" to "unacquired" and can thus be left +unchanged. + +This approach can gracefully fail: unlike atomic sections, there is no +guarantee that the transaction runs until the end of the block. If you +perform any input/output while you hold the lock, the transaction will +end as usual just before the input/output operation. If this occurs, +then the lock elision mode is cancelled and the lock's "acquired" state +is really written. + +Even if the lock is really acquired already, a transaction doesn't have +to wait for it to become free again. It can enter the elision-mode anyway +and tentatively execute the content of the block. It is only at the end, +when trying to commit, that the thread will pause. As soon as the real +value stored in the lock is switched back to "unacquired", it can then +proceed and attempt to commit its already-executed transaction (which +can fail and abort and restart from the scratch, as usual). + +Note that this is all *not implemented yet,* but we expect it to work +even if you acquire and release several locks. The elision-mode +transaction will extend until the first lock you acquired is released, +or until the code performs an input/output or a wait operation (for +example, waiting for another lock that is currently not free). In the +common case of acquiring several locks in nested order, they will all be +elided by the same transaction. + +.. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410 + + +Atomic sections, Transactions, etc.: a better way to write parallel programs +---------------------------------------------------------------------------- + +(This section is based on locks as we plan to implement them, but also +works with the existing atomic sections.) + +In the cases where elision works, the block of code can run in parallel +with other blocks of code *even if they are protected by the same lock.* +You still get the illusion that the blocks are run sequentially. This +works even for multiple threads that run each a series of such blocks +and nothing else, protected by one single global lock. This is +basically the Python application-level equivalent of what was done with +the interpreter in ``pypy-stm``: while you think you are writing +thread-unfriendly code because of this global lock, actually the +underlying system is able to make it run on multiple cores anyway. + +This capability can be hidden in a library or in the framework you use; +the end user's code does not need to be explicitly aware of using +threads. For a simple example of this, there is `transaction.py`_ in +``lib_pypy``. The idea is that you write, or already have, some program +where the function ``f(key, value)`` runs on every item of some big +dictionary, say:: + + for key, value in bigdict.items(): + f(key, value) + +Then you simply replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) transaction.run() This code runs the various calls to ``f(key, value)`` using a thread -pool, but every single call is done in an atomic section. The end -result is that the behavior should be exactly equivalent: you don't get -any extra multithreading issue. +pool, but every single call is executed under the protection of a unique +lock. The end result is that the behavior is exactly equivalent --- in +fact it makes little sense to do it in this way on a non-STM PyPy or on +CPython. But on ``pypy-stm``, the various locked calls to ``f(key, +value)`` can tentatively be executed in parallel, even if the observable +result is as if they were executed in some serial order. This approach hides the notion of threads from the end programmer, including all the hard multithreading-related issues. This is not the @@ -223,41 +319,176 @@ only requires that the end programmer identifies where this parallelism is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. - + .. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP -================== -Other APIs in pypy-stm: +.. _`transactional_memory`: -* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" - in this pypy-stm. This is the limit above which more threads will not - be able to execute on more cores. (Right now it is limited to 4 due - to inter-segment overhead, but should be increased in the future. It +API of transactional_memory +--------------------------- + +The new pure Python module ``transactional_memory`` runs on both CPython +and PyPy, both with and without STM. It contains: + +* ``getsegmentlimit()``: return the number of "segments" in + this pypy-stm. This is the limit above which more threads will not be + able to execute on more cores. (Right now it is limited to 4 due to + inter-segment overhead, but should be increased in the future. It should also be settable, and the default value should depend on the - number of actual CPUs.) + number of actual CPUs.) If STM is not available, this returns 1. -* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but - raises an exception if you attempt to nest it inside another - ``atomic``. +* ``print_abort_info(minimum_time=0.0)``: debugging help. Each thread + remembers the longest abort or pause it did because of cross-thread + contention_. This function prints it to ``stderr`` if the time lost + is greater than ``minimum_time`` seconds. The record is then + cleared, to make it ready for new events. This function returns + ``True`` if it printed a report, and ``False`` otherwise. -* ``__pypy__.thread.signals_enabled``: a context manager that runs - its block with signals enabled. By default, signals are only - enabled in the main thread; a non-main thread will not receive - signals (this is like CPython). Enabling signals in non-main threads - is useful for libraries where threads are hidden and the end user is - not expecting his code to run elsewhere than in the main thread. -Note that all of this API is (or will be) implemented in a regular PyPy -too: for example, ``with atomic`` will simply mean "don't release the -GIL" and ``getsegmentlimit()`` will return 1. +API of __pypy__.thread +---------------------- -================== +The ``__pypy__.thread`` submodule is a built-in module of PyPy that +contains a few internal built-in functions used by the +``transactional_memory`` module, plus the following: + +* ``__pypy__.thread.atomic``: a context manager to run a block in + fully atomic mode, without "releasing the GIL". (May be eventually + removed?) + +* ``__pypy__.thread.signals_enabled``: a context manager that runs its + block with signals enabled. By default, signals are only enabled in + the main thread; a non-main thread will not receive signals (this is + like CPython). Enabling signals in non-main threads is useful for + libraries where threads are hidden and the end user is not expecting + his code to run elsewhere than in the main thread. + + +.. _contention: + +Conflicts +--------- + +Based on Software Transactional Memory, the ``pypy-stm`` solution is +prone to "conflicts". To repeat the basic idea, threads execute their code +speculatively, and at known points (e.g. between bytecodes) they +coordinate with each other to agree on which order their respective +actions should be "committed", i.e. become globally visible. Each +duration of time between two commit-points is called a transaction. + +A conflict occurs when there is no consistent ordering. The classical +example is if two threads both tried to change the value of the same +global variable. In that case, only one of them can be allowed to +proceed, and the other one must be either paused or aborted (restarting +the transaction). If this occurs too often, parallelization fails. + +How much actual parallelization a multithreaded program can see is a bit +subtle. Basically, a program not using ``__pypy__.thread.atomic`` or +eliding locks, or doing so for very short amounts of time, will +parallelize almost freely (as long as it's not some artificial example +where, say, all threads try to increase the same global counter and do +nothing else). + +However, using if the program requires longer transactions, it comes +with less obvious rules. The exact details may vary from version to +version, too, until they are a bit more stabilized. Here is an +overview. + +Parallelization works as long as two principles are respected. The +first one is that the transactions must not *conflict* with each other. +The most obvious sources of conflicts are threads that all increment a +global shared counter, or that all store the result of their +computations into the same list --- or, more subtly, that all ``pop()`` +the work to do from the same list, because that is also a mutation of +the list. (It is expected that some STM-aware library will eventually +be designed to help with conflict problems, like a STM-aware queue.) + +A conflict occurs as follows: when a transaction commits (i.e. finishes +successfully) it may cause other transactions that are still in progress +to abort and retry. This is a waste of CPU time, but even in the worst +case senario it is not worse than a GIL, because at least one +transaction succeeds (so we get at worst N-1 CPUs doing useless jobs and +1 CPU doing a job that commits successfully). + +Conflicts do occur, of course, and it is pointless to try to avoid them +all. For example they can be abundant during some warm-up phase. What +is important is to keep them rare enough in total. + +Another issue is that of avoiding long-running so-called "inevitable" +transactions ("inevitable" is taken in the sense of "which cannot be +avoided", i.e. transactions which cannot abort any more). Transactions +like that should only occur if you use ``__pypy__.thread.atomic``, +generally become of I/O in atomic blocks. They work, but the +transaction is turned inevitable before the I/O is performed. For all +the remaining execution time of the atomic block, they will impede +parallel work. The best is to organize the code so that such operations +are done completely outside ``__pypy__.thread.atomic``. + +(This is related to the fact that blocking I/O operations are +discouraged with Twisted, and if you really need them, you should do +them on their own separate thread.) + +In case of lock elision, we don't get long-running inevitable +transactions, but a different problem can occur: doing I/O cancels lock +elision, and the lock turns into a real lock, preventing other threads +from committing if they also need this lock. (More about it when lock +elision is implemented and tested.) + + + +Implementation +============== + +XXX this section mostly empty for now + + +Low-level statistics +-------------------- + +When a non-main thread finishes, you get low-level statistics printed to +stderr, looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + sync commit soon 1 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + longest recordered marker 0.000826 s + "File "x.py", line 5, in f" + +On each line, the first number is a counter, and the second number gives +the associated time --- the amount of real time that the thread was in +this state. The sum of all the times should be equal to the total time +between the thread's start and the thread's end. The most important +points are "run committed", which gives the amount of useful work, and +"outside transaction", which should give the time spent e.g. in library +calls (right now it seems to be larger than that; to investigate). The +various "run aborted" and "wait" entries are time lost due to +conflicts_. Everything else is overhead of various forms. (Short-, +medium- and long-term future work involves reducing this overhead :-) + +The last two lines are special; they are an internal marker read by +``transactional_memory.print_abort_info()``. + +These statistics are not printed out for the main thread, for now. Reference to implementation details -=================================== +----------------------------------- The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more @@ -282,3 +513,15 @@ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py + + + +See also +======== + +See also +https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst +(section about STM). + + +.. include:: _ref.txt diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -167,3 +167,6 @@ .. branch: fix-tpname Changes hacks surrounding W_TypeObject.name to match CPython's tp_name + +.. branch: tkinter_osx_packaging +OS/X specific header path diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ec864bd08d50 +.. startrev: b2cc67adbaad diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -62,8 +62,8 @@ fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) has_flock = cConfig.has_flock if has_flock: diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -11,7 +11,9 @@ os.unlink(i) class AppTestFcntl: - spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'rctime')) + spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', + 'select', 'rctime')) + def setup_class(cls): tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = cls.space.wrap(tmpprefix) @@ -257,6 +259,31 @@ os.close(mfd) os.close(sfd) + def test_ioctl_signed_unsigned_code_param(self): + import fcntl + import os + import pty + import struct + import termios + + mfd, sfd = pty.openpty() + try: + if termios.TIOCSWINSZ < 0: + set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL + else: + set_winsz_opcode_pos = termios.TIOCSWINSZ + set_winsz_opcode_maybe_neg, = struct.unpack("i", + struct.pack("I", termios.TIOCSWINSZ)) + + our_winsz = struct.pack("HHHH",80,25,0,0) + # test both with a positive and potentially negative ioctl code + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz) + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz) + finally: + os.close(mfd) + os.close(sfd) + def test_large_flag(self): import sys if any(plat in sys.platform diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -21,11 +21,6 @@ return space.fromcache(Cache).error - at unwrap_spec(format=str) -def calcsize(space, format): - return space.wrap(_calcsize(space, format)) - - def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -38,7 +33,11 @@ @unwrap_spec(format=str) -def pack(space, format, args_w): +def calcsize(space, format): + return space.wrap(_calcsize(space, format)) + + +def _pack(space, format, args_w): if jit.isconstant(format): size = _calcsize(space, format) else: @@ -50,13 +49,18 @@ raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: raise OperationError(get_error(space), space.wrap(e.msg)) - return space.wrapbytes(fmtiter.result.build()) + return fmtiter.result.build() + + + at unwrap_spec(format=str) +def pack(space, format, args_w): + return space.wrapbytes(_pack(space, format, args_w)) # XXX inefficient @unwrap_spec(format=str, offset=int) def pack_into(space, format, w_buffer, offset, args_w): - res = pack(space, format, args_w).bytes_w(space) + res = _pack(space, format, args_w) buf = space.writebuf_w(w_buffer) if offset < 0: offset += buf.getlength() diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -158,7 +158,15 @@ # Good default if there are no replacements. buf = StringBuilder(len("bytearray(b'')") + len(s)) - buf.append("bytearray(b'") + buf.append("bytearray(b") + quote = "'" + for c in s: + if c == '"': + quote = "'" + break + elif c == "'": + quote = '"' + buf.append(quote) for i in range(len(s)): c = s[i] @@ -180,7 +188,8 @@ else: buf.append(c) - buf.append("')") + buf.append(quote) + buf.append(")") return space.wrap(buf.build()) diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -39,22 +39,20 @@ # ignore whitespace after bracket while i < slen and s[i] == ' ': i += 1 + while slen > 0 and s[slen-1] == ' ': + slen -= 1 # extract first number realstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E') and i != realstart: + if s[i] in ('+', '-') and pc not in ('e', 'E') and i != realstart: break pc = s[i] i += 1 realstop = i - # ignore whitespace - while i < slen and s[i] == ' ': - i += 1 - # return appropriate strings is only one number is there if i >= slen: newstop = realstop - 1 @@ -76,20 +74,17 @@ # find sign for imaginary part if s[i] == '-' or s[i] == '+': imagsign = s[i] - if imagsign == ' ': + else: raise ValueError - i+=1 - # whitespace - while i < slen and s[i] == ' ': - i += 1 + i += 1 if i >= slen: raise ValueError imagstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E'): + if s[i] in ('+', '-') and pc not in ('e', 'E'): break pc = s[i] i += 1 @@ -97,14 +92,12 @@ imagstop = i - 1 if imagstop < 0: raise ValueError - if s[imagstop] not in ('j','J'): + if s[imagstop] not in ('j', 'J'): raise ValueError if imagstop < imagstart: raise ValueError - while i 2147483647 @@ -91,7 +86,7 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ROOTFINDERS, + ["n/a", "shadowstack", "asmgcc"], "shadowstack", cmdline="--gcrootfinder", requires={ @@ -372,9 +367,10 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X + # disallow asmgcc on OS/X and on Win32 if config.translation.gcrootfinder == "asmgcc": - assert sys.platform != "darwin" + assert sys.platform != "darwin", "'asmgcc' not supported on OS/X" + assert sys.platform != "win32", "'asmgcc' not supported on Win32" # ---------------------------------------------------------------- diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -296,10 +296,11 @@ # trim: instructions with no framesize are removed from self.insns, # and from the 'previous_insns' lists - assert hasattr(self.insns[0], 'framesize') - old = self.insns[1:] - del self.insns[1:] - for insn in old: + if 0: # <- XXX disabled because it seems bogus, investigate more + assert hasattr(self.insns[0], 'framesize') + old = self.insns[1:] + del self.insns[1:] + for insn in old: if hasattr(insn, 'framesize'): self.insns.append(insn) insn.previous_insns = [previnsn for previnsn in insn.previous_insns diff --git a/rpython/translator/c/src/asm.c b/rpython/translator/c/src/asm.c --- a/rpython/translator/c/src/asm.c +++ b/rpython/translator/c/src/asm.c @@ -12,6 +12,6 @@ # include "src/asm_ppc.c" #endif -#if defined(MS_WINDOWS) && defined(_MSC_VER) +#if defined(_MSC_VER) # include "src/asm_msvc.c" #endif diff --git a/rpython/translator/c/src/asm_msvc.c b/rpython/translator/c/src/asm_msvc.c --- a/rpython/translator/c/src/asm_msvc.c +++ b/rpython/translator/c/src/asm_msvc.c @@ -1,5 +1,6 @@ #ifdef PYPY_X86_CHECK_SSE2 #include +#include void pypy_x86_check_sse2(void) { int features; From noreply at buildbot.pypy.org Mon May 19 02:34:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 19 May 2014 02:34:40 +0200 (CEST) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20140519003440.0D5BD1C0231@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r71579:2032fc136bbc Date: 2014-05-19 02:21 +0200 http://bitbucket.org/pypy/pypy/changeset/2032fc136bbc/ Log: hg merge default diff too long, truncating to 2000 out of 4419 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,7 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,61 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +146,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +168,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +192,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -205,6 +214,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +228,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -235,27 +246,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +282,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -74,6 +74,29 @@ size2 = os.path.getsize(filename) self.assertTrue(size1 > size2 >= size0) + def test_sync(self): + # check if sync works at all, not sure how to check it + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.sync() + + def test_get_key(self): + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g['x'], 'x' * 10000) + + def test_key_with_null_bytes(self): + key = 'a\x00b' + value = 'c\x00d' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) def test_main(): run_unittest(TestGdbm) diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py new file mode 100644 --- /dev/null +++ b/lib_pypy/gdbm.py @@ -0,0 +1,174 @@ +import cffi, os + +ffi = cffi.FFI() +ffi.cdef(''' +#define GDBM_READER ... +#define GDBM_WRITER ... +#define GDBM_WRCREAT ... +#define GDBM_NEWDB ... +#define GDBM_FAST ... +#define GDBM_SYNC ... +#define GDBM_NOLOCK ... +#define GDBM_REPLACE ... + +void* gdbm_open(char *, int, int, int, void (*)()); +void gdbm_close(void*); + +typedef struct { + char *dptr; + int dsize; +} datum; + +datum gdbm_fetch(void*, datum); +int gdbm_delete(void*, datum); +int gdbm_store(void*, datum, datum, int); +int gdbm_exists(void*, datum); + +int gdbm_reorganize(void*); + +datum gdbm_firstkey(void*); +datum gdbm_nextkey(void*, datum); +void gdbm_sync(void*); + +char* gdbm_strerror(int); +int gdbm_errno; + +void free(void*); +''') + +try: + lib = ffi.verify(''' + #include "gdbm.h" + ''', libraries=['gdbm']) +except cffi.VerificationError as e: + # distutils does not preserve the actual message, + # but the verification is simple enough that the + # failure must be due to missing gdbm dev libs + raise ImportError('%s: %s' %(e.__class__.__name__, e)) + +class error(Exception): + pass + +def _fromstr(key): + if not isinstance(key, str): + raise TypeError("gdbm mappings have string indices only") + return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} + +class gdbm(object): + ll_dbm = None + + def __init__(self, filename, iflags, mode): + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.size = -1 + if not res: + self._raise_from_errno() + self.ll_dbm = res + + def close(self): + if self.ll_dbm: + lib.gdbm_close(self.ll_dbm) + self.ll_dbm = None + + def _raise_from_errno(self): + if ffi.errno: + raise error(os.strerror(ffi.errno)) + raise error(lib.gdbm_strerror(lib.gdbm_errno)) + + def __len__(self): + if self.size < 0: + self.size = len(self.keys()) + return self.size + + def __setitem__(self, key, value): + self._check_closed() + self._size = -1 + r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self._raise_from_errno() + + def __delitem__(self, key): + self._check_closed() + res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) + + def __contains__(self, key): + self._check_closed() + return lib.gdbm_exists(self.ll_dbm, _fromstr(key)) + has_key = __contains__ + + def __getitem__(self, key): + self._check_closed() + drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) + if not drec.dptr: + raise KeyError(key) + res = str(ffi.buffer(drec.dptr, drec.dsize)) + lib.free(drec.dptr) + return res + + def keys(self): + self._check_closed() + l = [] + key = lib.gdbm_firstkey(self.ll_dbm) + while key.dptr: + l.append(str(ffi.buffer(key.dptr, key.dsize))) + nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + lib.free(key.dptr) + key = nextkey + return l + + def firstkey(self): + self._check_closed() + key = lib.gdbm_firstkey(self.ll_dbm) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def nextkey(self, key): + self._check_closed() + key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def reorganize(self): + self._check_closed() + if lib.gdbm_reorganize(self.ll_dbm) < 0: + self._raise_from_errno() + + def _check_closed(self): + if not self.ll_dbm: + raise error("GDBM object has already been closed") + + __del__ = close + + def sync(self): + self._check_closed() + lib.gdbm_sync(self.ll_dbm) + +def open(filename, flags='r', mode=0666): + if flags[0] == 'r': + iflags = lib.GDBM_READER + elif flags[0] == 'w': + iflags = lib.GDBM_WRITER + elif flags[0] == 'c': + iflags = lib.GDBM_WRCREAT + elif flags[0] == 'n': + iflags = lib.GDBM_NEWDB + else: + raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + for flag in flags[1:]: + if flag == 'f': + iflags |= lib.GDBM_FAST + elif flag == 's': + iflags |= lib.GDBM_SYNC + elif flag == 'u': + iflags |= lib.GDBM_NOLOCK + else: + raise error("Flag '%s' not supported" % flag) + return gdbm(filename, iflags, mode) + +open_flags = "rwcnfsu" diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.2' +version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.2.1' +release = '2.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,10 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* merge PYPY_IRC_TOPIC environment variable handling from previous release - in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and - pypy/interpreter/app_main.py so release versions will not print a random - IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.1`_: the latest official release +* `Release 2.3.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.1`: http://pypy.org/download.html +.. _`Release 2.3.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -100,6 +100,8 @@ ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. + Note that using a : in fname is a bad idea, Windows + users, beware. ``:``\ *fname* Full logging, including ``debug_print``. @@ -113,6 +115,11 @@ generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. +``PYPY_IRC_TOPIC`` + If set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. + + .. include:: ../gc_info.rst :start-line: 7 diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -93,7 +93,7 @@ * Fix handling of tp_name for type objects .. _`HippyVM`: http://www.hippyvm.com -.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.0.html New Platforms and Features diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -30,7 +30,8 @@ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ listed below, it should be in theory within 20%-50% slower than a -regular PyPy, comparing the JIT version in both cases. It is called +regular PyPy, comparing the JIT version in both cases (but see below!). +It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -90,6 +91,11 @@ * So far, small examples work fine, but there are still a few bugs. We're busy fixing them as we find them; feel free to `report bugs`_. +* It runs with an overhead as low as 20% on examples like "richards". + There are also other examples with higher overheads --up to 10x for + "translate.py"-- which we are still trying to understand. One suspect + is our partial GC implementation, see below. + * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause segfaults. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: b2cc67adbaad +.. startrev: f556d32f8319 diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -37,6 +37,9 @@ PYTHONPATH : %r-separated list of directories prefixed to the default module search path. The result is sys.path. PYTHONIOENCODING: Encoding[:errors] used for stdin/stdout/stderr. +PYPY_IRC_TOPIC: if set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. +PYPYLOG: If set to a non-empty value, enable logging. """ try: @@ -678,7 +681,11 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - success = run_toplevel(interactive_console, mainmodule, quiet) + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' or ( + readenv and os.getenv('PYPY_IRC_TOPIC')) + success = run_toplevel(interactive_console, mainmodule, + quiet=quiet or not irc_topic) except SystemExit as e: status = e.code else: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,11 +7,8 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir -from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = PYPY_VERSION[3] == "final" - python3 = os.environ.get("PYTHON3", "python3") @@ -21,7 +18,6 @@ stdout=subprocess.PIPE) return p.stdout.read().rstrip() banner = get_banner() -print repr(banner) app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') app_main = os.path.abspath(app_main) @@ -255,10 +251,6 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt - if is_release: - assert irc_header not in child.before - else: - assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') @@ -278,6 +270,22 @@ child.sendline("'' in sys.path") child.expect("True") + def test_yes_irc_topic(self, monkeypatch): + monkeypatch.setenv('PYPY_IRC_TOPIC', '1') + child = self.spawn([]) + child.expect(irc_header) # banner + + def test_maybe_irc_topic(self): + import sys + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' + child = self.spawn([]) + child.expect('>>>') # banner + if irc_topic: + assert irc_header in child.before + else: + assert irc_header not in child.before + def test_help(self): # test that -h prints the usage, including the name of the executable # which should be /full/path/to/app_main.py in this case @@ -1048,6 +1056,7 @@ # ---------------------------------------- from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION cpy_ver = '%d' % CPYTHON_VERSION[0] + from lib_pypy._pypy_interact import irc_header goal_dir = os.path.dirname(app_main) # build a directory hierarchy like which contains both bin/pypy-c and @@ -1067,6 +1076,7 @@ self.w_fake_exe = self.space.wrap(str(fake_exe)) self.w_expected_path = self.space.wrap(expected_path) self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir)) + self.w_is_release = self.space.wrap(PYPY_VERSION[3] == "final") self.w_tmp_dir = self.space.wrap(tmp_dir) @@ -1136,3 +1146,4 @@ # assert it did not crash finally: sys.path[:] = old_sys_path + diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "3.2.5" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0-alpha0" +#define PYPY_VERSION "2.4.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,7 +2,7 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat, genericpath +import sys, os, stat from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -528,8 +528,7 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - # os.path.isdir on win32 is not rpython when pywin32 installed - if genericpath.isdir(filepart) and case_ok(filepart): + if os.path.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -45,7 +45,7 @@ The variable struct.error is an exception raised on errors.""" - applevel_name = '_struct' + applevel_name = "_struct" interpleveldefs = { 'error': 'interp_struct.get_error(space)', @@ -55,9 +55,9 @@ 'pack_into': 'interp_struct.pack_into', 'unpack': 'interp_struct.unpack', 'unpack_from': 'interp_struct.unpack_from', - '_clearcache': 'interp_struct.clearcache', 'Struct': 'interp_struct.W_Struct', + '_clearcache': 'interp_struct.clearcache', } appleveldefs = { diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -144,3 +144,6 @@ pack_into=interp2app(W_Struct.descr_pack_into), unpack_from=interp2app(W_Struct.descr_unpack_from), ) + +def clearcache(space): + """No-op on PyPy""" diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 3, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 4, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -74,6 +74,7 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) + subprocess.check_call([str(pypy_c), '-c', 'import gdbm']) if not withouttk: try: subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -397,16 +397,15 @@ return repr(graph) + blk + opid def flowin(self, graph, block): - #print 'Flowing', block, [self.binding(a) for a in block.inputargs] try: - for i in range(len(block.operations)): + for i, op in enumerate(block.operations): + self.bookkeeper.enter((graph, block, i)) try: - self.bookkeeper.enter((graph, block, i)) - self.consider_op(block, i) + self.consider_op(op) finally: self.bookkeeper.leave() - except BlockedInference, e: + except BlockedInference as e: if (e.op is block.operations[-1] and block.exitswitch == c_last_exception): # this is the case where the last operation of the block will @@ -428,11 +427,16 @@ # other cases are problematic (but will hopefully be solved # later by reflowing). Throw the BlockedInference up to # processblock(). + e.opindex = i raise except annmodel.HarmlesslyBlocked: return + except annmodel.AnnotatorError as e: # note that UnionError is a subclass + e.source = gather_error(self, graph, block, i) + raise + else: # dead code removal: don't follow all exits if the exitswitch # is known @@ -443,11 +447,6 @@ exits = [link for link in exits if link.exitcase == s_exitswitch.const] - # mapping (exitcase, variable) -> s_annotation - # that can be attached to booleans, exitswitches - knowntypedata = getattr(self.bindings.get(block.exitswitch), - "knowntypedata", {}) - # filter out those exceptions which cannot # occour for this specific, typed operation. if block.exitswitch == c_last_exception: @@ -480,93 +479,12 @@ exits.append(link) candidates = [c for c in candidates if c not in covered] + # mapping (exitcase, variable) -> s_annotation + # that can be attached to booleans, exitswitches + knowntypedata = getattr(self.bindings.get(block.exitswitch), + "knowntypedata", {}) for link in exits: - in_except_block = False - - last_exception_var = link.last_exception # may be None for non-exception link - last_exc_value_var = link.last_exc_value # may be None for non-exception link - - if isinstance(link.exitcase, (types.ClassType, type)) \ - and issubclass(link.exitcase, py.builtin.BaseException): - assert last_exception_var and last_exc_value_var - last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase) - last_exception_object = annmodel.SomeType() - if isinstance(last_exception_var, Constant): - last_exception_object.const = last_exception_var.value - last_exception_object.is_type_of = [last_exc_value_var] - - if isinstance(last_exception_var, Variable): - self.setbinding(last_exception_var, last_exception_object) - if isinstance(last_exc_value_var, Variable): - self.setbinding(last_exc_value_var, last_exc_value_object) - - last_exception_object = annmodel.SomeType() - if isinstance(last_exception_var, Constant): - last_exception_object.const = last_exception_var.value - #if link.exitcase is Exception: - # last_exc_value_object = annmodel.SomeObject() - #else: - last_exc_value_vars = [] - in_except_block = True - - ignore_link = False - cells = [] - renaming = {} - for a,v in zip(link.args,link.target.inputargs): - renaming.setdefault(a, []).append(v) - for a,v in zip(link.args,link.target.inputargs): - if a == last_exception_var: - assert in_except_block - cells.append(last_exception_object) - elif a == last_exc_value_var: - assert in_except_block - cells.append(last_exc_value_object) - last_exc_value_vars.append(v) - else: - cell = self.binding(a) - if (link.exitcase, a) in knowntypedata: - knownvarvalue = knowntypedata[(link.exitcase, a)] - cell = pair(cell, knownvarvalue).improve() - # ignore links that try to pass impossible values - if cell == annmodel.s_ImpossibleValue: - ignore_link = True - - if hasattr(cell,'is_type_of'): - renamed_is_type_of = [] - for v in cell.is_type_of: - new_vs = renaming.get(v,[]) - renamed_is_type_of += new_vs - assert cell.knowntype is type - newcell = annmodel.SomeType() - if cell.is_constant(): - newcell.const = cell.const - cell = newcell - cell.is_type_of = renamed_is_type_of - - if hasattr(cell, 'knowntypedata'): - renamed_knowntypedata = {} - for (value, v), s in cell.knowntypedata.items(): - new_vs = renaming.get(v, []) - for new_v in new_vs: - renamed_knowntypedata[value, new_v] = s - assert isinstance(cell, annmodel.SomeBool) - newcell = annmodel.SomeBool() - if cell.is_constant(): - newcell.const = cell.const - cell = newcell - cell.set_knowntypedata(renamed_knowntypedata) - - cells.append(cell) - - if ignore_link: - continue - - if in_except_block: - last_exception_object.is_type_of = last_exc_value_vars - - self.links_followed[link] = True - self.addpendingblock(graph, link.target, cells) - + self.follow_link(graph, link, knowntypedata) if block in self.notify: # reflow from certain positions when this block is done for callback in self.notify[block]: @@ -575,39 +493,114 @@ else: callback() + def follow_link(self, graph, link, knowntypedata): + in_except_block = False + last_exception_var = link.last_exception # may be None for non-exception link + last_exc_value_var = link.last_exc_value # may be None for non-exception link + + if isinstance(link.exitcase, (types.ClassType, type)) \ + and issubclass(link.exitcase, py.builtin.BaseException): + assert last_exception_var and last_exc_value_var + last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase) + last_exception_object = annmodel.SomeType() + if isinstance(last_exception_var, Constant): + last_exception_object.const = last_exception_var.value + last_exception_object.is_type_of = [last_exc_value_var] + + if isinstance(last_exception_var, Variable): + self.setbinding(last_exception_var, last_exception_object) + if isinstance(last_exc_value_var, Variable): + self.setbinding(last_exc_value_var, last_exc_value_object) + + last_exception_object = annmodel.SomeType() + if isinstance(last_exception_var, Constant): + last_exception_object.const = last_exception_var.value + #if link.exitcase is Exception: + # last_exc_value_object = annmodel.SomeObject() + #else: + last_exc_value_vars = [] + in_except_block = True + + ignore_link = False + cells = [] + renaming = {} + for a, v in zip(link.args, link.target.inputargs): + renaming.setdefault(a, []).append(v) + for a, v in zip(link.args, link.target.inputargs): + if a == last_exception_var: + assert in_except_block + cells.append(last_exception_object) + elif a == last_exc_value_var: + assert in_except_block + cells.append(last_exc_value_object) + last_exc_value_vars.append(v) + else: + cell = self.binding(a) + if (link.exitcase, a) in knowntypedata: + knownvarvalue = knowntypedata[(link.exitcase, a)] + cell = pair(cell, knownvarvalue).improve() + # ignore links that try to pass impossible values + if cell == annmodel.s_ImpossibleValue: + ignore_link = True + + if hasattr(cell,'is_type_of'): + renamed_is_type_of = [] + for v in cell.is_type_of: + new_vs = renaming.get(v,[]) + renamed_is_type_of += new_vs + assert cell.knowntype is type + newcell = annmodel.SomeType() + if cell.is_constant(): + newcell.const = cell.const + cell = newcell + cell.is_type_of = renamed_is_type_of + + if hasattr(cell, 'knowntypedata'): + renamed_knowntypedata = {} + for (value, v), s in cell.knowntypedata.items(): + new_vs = renaming.get(v, []) + for new_v in new_vs: + renamed_knowntypedata[value, new_v] = s + assert isinstance(cell, annmodel.SomeBool) + newcell = annmodel.SomeBool() + if cell.is_constant(): + newcell.const = cell.const + cell = newcell + cell.set_knowntypedata(renamed_knowntypedata) + + cells.append(cell) + if ignore_link: + return + + if in_except_block: + last_exception_object.is_type_of = last_exc_value_vars + self.links_followed[link] = True + self.addpendingblock(graph, link.target, cells) + #___ creating the annotations based on operations ______ - def consider_op(self, block, opindex): - op = block.operations[opindex] - try: - argcells = [self.binding(a) for a in op.args] + def consider_op(self, op): + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - resultcell = op.consider(self, *argcells) - except annmodel.AnnotatorError as e: # note that UnionError is a subclass - graph = self.bookkeeper.position_key[0] - e.source = gather_error(self, graph, block, opindex) - raise + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, -1) + resultcell = op.consider(self, *argcells) if resultcell is None: - resultcell = self.noreturnvalue(op) + resultcell = annmodel.s_ImpossibleValue elif resultcell == annmodel.s_ImpossibleValue: - raise BlockedInference(self, op, opindex) # the operation cannot succeed + raise BlockedInference(self, op, -1) # the operation cannot succeed assert isinstance(resultcell, annmodel.SomeObject) assert isinstance(op.result, Variable) self.setbinding(op.result, resultcell) # bind resultcell to op.result - def noreturnvalue(self, op): - return annmodel.s_ImpossibleValue # no return value (hook method) - class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -14,16 +14,12 @@ SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) -from rpython.annotator.bookkeeper import getbookkeeper +from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) @@ -838,5 +834,5 @@ else: basedef = s_wrf1.classdef.commonbase(s_wrf2.classdef) if basedef is None: # no common base class! complain... - return SomeObject() + raise UnionError(s_wrf1, s_wrf2) return SomeWeakRef(basedef) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -13,8 +13,6 @@ s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, SomeByteArray, SomeConstantType) -from rpython.rtyper.llannotation import ( - SomeAddress, SomePtr, SomeLLADTMeth, lltype_to_annotation) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -23,10 +21,17 @@ from rpython.annotator.argument import ArgumentsForTranslation from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind -from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper import extregistry +BUILTIN_ANALYZERS = {} + +def analyzer_for(func): + def wrapped(ann_func): + BUILTIN_ANALYZERS[func] = ann_func + return func + return wrapped + class Bookkeeper(object): """The log of choices that have been made while analysing the operations. It ensures that the same 'choice objects' will be returned if we ask @@ -137,6 +142,7 @@ check_no_flags(clsdef) def consider_call_site(self, call_op): + from rpython.rtyper.llannotation import SomeLLADTMeth, lltype_to_annotation binding = self.annotator.binding s_callable = binding(call_op.args[0]) args_s = [binding(arg) for arg in call_op.args[1:]] @@ -297,10 +303,6 @@ elif extregistry.is_registered(x): entry = extregistry.lookup(x) result = entry.compute_annotation_bk(self) - elif isinstance(x, lltype._ptr): - result = SomePtr(lltype.typeOf(x)) - elif isinstance(x, llmemory.fakeaddress): - result = SomeAddress() elif tp is type: result = SomeConstantType(x, self) elif callable(x): @@ -600,6 +602,7 @@ return False else: return True + # get current bookkeeper def getbookkeeper(): @@ -610,7 +613,8 @@ except AttributeError: return None +def immutablevalue(x): + return getbookkeeper().immutablevalue(x) + def delayed_imports(): - # import ordering hack - global BUILTIN_ANALYZERS - from rpython.annotator.builtin import BUILTIN_ANALYZERS + import rpython.annotator.builtin diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -8,17 +8,14 @@ SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) -from rpython.rtyper.llannotation import ( - SomeAddress, annotation_to_lltype, lltype_to_annotation, ll_to_annotation) -from rpython.annotator.bookkeeper import getbookkeeper +from rpython.annotator.bookkeeper import ( + getbookkeeper, immutablevalue, BUILTIN_ANALYZERS, analyzer_for) from rpython.annotator import description from rpython.flowspace.model import Constant import rpython.rlib.rarithmetic import rpython.rlib.objectmodel +from rpython.annotator.model import AnnotatorError -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) def constpropagate(func, args_s, s_result): """Returns s_result unless all args are constants, in which case the @@ -44,14 +41,6 @@ func, args, realresult, s_result)) return s_realresult -BUILTIN_ANALYZERS = {} - -def analyzer_for(func): - def wrapped(ann_func): - BUILTIN_ANALYZERS[func] = ann_func - return func - return wrapped - # ____________________________________________________________ def builtin_range(*args): @@ -223,7 +212,7 @@ def builtin_tuple(s_iterable): if isinstance(s_iterable, SomeTuple): return s_iterable - return SomeObject() + raise AnnotatorError("tuple(): argument must be another tuple") def builtin_list(s_iterable): if isinstance(s_iterable, SomeList): @@ -332,6 +321,7 @@ @analyzer_for(rpython.rlib.objectmodel.hlinvoke) def robjmodel_hlinvoke(s_repr, s_llcallable, *args_s): + from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError @@ -354,25 +344,6 @@ def robjmodel_keepalive_until_here(*args_s): return immutablevalue(None) - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) -def llmemory_cast_ptr_to_adr(s): - from rpython.rtyper.llannotation import SomeInteriorPtr - assert not isinstance(s, SomeInteriorPtr) - return SomeAddress() - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr) -def llmemory_cast_adr_to_ptr(s, s_type): - assert s_type.is_constant() - return SomePtr(s_type.const) - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int) -def llmemory_cast_adr_to_int(s, s_mode=None): - return SomeInteger() # xxx - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr) -def llmemory_cast_int_to_adr(s): - return SomeAddress() - try: import unicodedata except ImportError: @@ -386,131 +357,6 @@ def analyze(): return SomeOrderedDict(getbookkeeper().getdictdef()) - - -# annotation of low-level types -from rpython.rtyper.llannotation import SomePtr -from rpython.rtyper.lltypesystem import lltype - - at analyzer_for(lltype.malloc) -def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, - s_add_memory_pressure=None): - assert (s_n is None or s_n.knowntype == int - or issubclass(s_n.knowntype, rpython.rlib.rarithmetic.base_int)) - assert s_T.is_constant() - if s_n is not None: - n = 1 - else: - n = None - if s_zero: - assert s_zero.is_constant() - if s_flavor is None: - p = lltype.malloc(s_T.const, n) - r = SomePtr(lltype.typeOf(p)) - else: - assert s_flavor.is_constant() - assert s_track_allocation is None or s_track_allocation.is_constant() - assert (s_add_memory_pressure is None or - s_add_memory_pressure.is_constant()) - # not sure how to call malloc() for the example 'p' in the - # presence of s_extraargs - r = SomePtr(lltype.Ptr(s_T.const)) - return r - - at analyzer_for(lltype.free) -def free(s_p, s_flavor, s_track_allocation=None): - assert s_flavor.is_constant() - assert s_track_allocation is None or s_track_allocation.is_constant() - # same problem as in malloc(): some flavors are not easy to - # malloc-by-example - #T = s_p.ll_ptrtype.TO - #p = lltype.malloc(T, flavor=s_flavor.const) - #lltype.free(p, flavor=s_flavor.const) - - at analyzer_for(lltype.render_immortal) -def render_immortal(s_p, s_track_allocation=None): - assert s_track_allocation is None or s_track_allocation.is_constant() - - at analyzer_for(lltype.typeOf) -def typeOf(s_val): - lltype = annotation_to_lltype(s_val, info="in typeOf(): ") - return immutablevalue(lltype) - - at analyzer_for(lltype.cast_primitive) -def cast_primitive(T, s_v): - assert T.is_constant() - return ll_to_annotation(lltype.cast_primitive(T.const, annotation_to_lltype(s_v)._defl())) - - at analyzer_for(lltype.nullptr) -def nullptr(T): - assert T.is_constant() - p = lltype.nullptr(T.const) - return immutablevalue(p) - - at analyzer_for(lltype.cast_pointer) -def cast_pointer(PtrT, s_p): - assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p - assert PtrT.is_constant() - cast_p = lltype.cast_pointer(PtrT.const, s_p.ll_ptrtype._defl()) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.cast_opaque_ptr) -def cast_opaque_ptr(PtrT, s_p): - assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p - assert PtrT.is_constant() - cast_p = lltype.cast_opaque_ptr(PtrT.const, s_p.ll_ptrtype._defl()) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.direct_fieldptr) -def direct_fieldptr(s_p, s_fieldname): - assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p - assert s_fieldname.is_constant() - cast_p = lltype.direct_fieldptr(s_p.ll_ptrtype._example(), - s_fieldname.const) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.direct_arrayitems) -def direct_arrayitems(s_p): - assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p - cast_p = lltype.direct_arrayitems(s_p.ll_ptrtype._example()) - return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) - - at analyzer_for(lltype.direct_ptradd) -def direct_ptradd(s_p, s_n): - assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p - # don't bother with an example here: the resulting pointer is the same - return s_p - - at analyzer_for(lltype.cast_ptr_to_int) -def cast_ptr_to_int(s_ptr): # xxx - return SomeInteger() - - at analyzer_for(lltype.cast_int_to_ptr) -def cast_int_to_ptr(PtrT, s_int): - assert PtrT.is_constant() - return SomePtr(ll_ptrtype=PtrT.const) - - at analyzer_for(lltype.identityhash) -def identityhash(s_obj): - assert isinstance(s_obj, SomePtr) - return SomeInteger() - - at analyzer_for(lltype.getRuntimeTypeInfo) -def getRuntimeTypeInfo(T): - assert T.is_constant() - return immutablevalue(lltype.getRuntimeTypeInfo(T.const)) - - at analyzer_for(lltype.runtime_type_info) -def runtime_type_info(s_p): - assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p - return SomePtr(lltype.typeOf(lltype.runtime_type_info(s_p.ll_ptrtype._example()))) - - at analyzer_for(lltype.Ptr) -def constPtr(T): - assert T.is_constant() - return immutablevalue(lltype.Ptr(T.const)) - - #________________________________ # weakrefs @@ -525,88 +371,9 @@ "a weakref to cannot be None") return SomeWeakRef(s_obj.classdef) - -from rpython.rtyper.lltypesystem import llmemory - - at analyzer_for(llmemory.weakref_create) -def llweakref_create(s_obj): - if (not isinstance(s_obj, SomePtr) or - s_obj.ll_ptrtype.TO._gckind != 'gc'): - raise Exception("bad type for argument to weakref_create(): %r" % ( - s_obj,)) - return SomePtr(llmemory.WeakRefPtr) - - at analyzer_for(llmemory.weakref_deref ) -def llweakref_deref(s_ptrtype, s_wref): - if not (s_ptrtype.is_constant() and - isinstance(s_ptrtype.const, lltype.Ptr) and - s_ptrtype.const.TO._gckind == 'gc'): - raise Exception("weakref_deref() arg 1 must be a constant " - "ptr type, got %s" % (s_ptrtype,)) - if not (isinstance(s_wref, SomePtr) and - s_wref.ll_ptrtype == llmemory.WeakRefPtr): - raise Exception("weakref_deref() arg 2 must be a WeakRefPtr, " - "got %s" % (s_wref,)) - return SomePtr(s_ptrtype.const) - - at analyzer_for(llmemory.cast_ptr_to_weakrefptr) -def llcast_ptr_to_weakrefptr(s_ptr): - assert isinstance(s_ptr, SomePtr) - return SomePtr(llmemory.WeakRefPtr) - - at analyzer_for(llmemory.cast_weakrefptr_to_ptr) -def llcast_weakrefptr_to_ptr(s_ptrtype, s_wref): - if not (s_ptrtype.is_constant() and - isinstance(s_ptrtype.const, lltype.Ptr)): - raise Exception("cast_weakrefptr_to_ptr() arg 1 must be a constant " - "ptr type, got %s" % (s_ptrtype,)) - if not (isinstance(s_wref, SomePtr) and - s_wref.ll_ptrtype == llmemory.WeakRefPtr): - raise Exception("cast_weakrefptr_to_ptr() arg 2 must be a WeakRefPtr, " - "got %s" % (s_wref,)) - return SomePtr(s_ptrtype.const) - #________________________________ # non-gc objects @analyzer_for(rpython.rlib.objectmodel.free_non_gc_object) def robjmodel_free_non_gc_object(obj): pass - - -#_________________________________ -# memory address - - at analyzer_for(llmemory.raw_malloc) -def raw_malloc(s_size): - assert isinstance(s_size, SomeInteger) #XXX add noneg...? - return SomeAddress() - - at analyzer_for(llmemory.raw_malloc_usage) -def raw_malloc_usage(s_size): - assert isinstance(s_size, SomeInteger) #XXX add noneg...? - return SomeInteger(nonneg=True) - - at analyzer_for(llmemory.raw_free) -def raw_free(s_addr): - assert isinstance(s_addr, SomeAddress) - - at analyzer_for(llmemory.raw_memclear) -def raw_memclear(s_addr, s_int): - assert isinstance(s_addr, SomeAddress) - assert isinstance(s_int, SomeInteger) - - at analyzer_for(llmemory.raw_memcopy) -def raw_memcopy(s_addr1, s_addr2, s_int): - assert isinstance(s_addr1, SomeAddress) - assert isinstance(s_addr2, SomeAddress) - assert isinstance(s_int, SomeInteger) #XXX add noneg...? - - -#_________________________________ -# offsetof/sizeof - - - at analyzer_for(llmemory.offsetof) -def offsetof(TYPE, fldname): - return SomeInteger() diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -6,9 +6,9 @@ SomeBool, SomeInteger, SomeString, SomeFloat, SomeList, SomeDict, s_None, SomeObject, SomeInstance, SomeTuple, unionof, SomeUnicodeString, SomeType, AnnotatorError) -from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.listdef import ListDef from rpython.annotator.dictdef import DictDef +from rpython.rtyper import extregistry _annotation_cache = {} @@ -40,7 +40,7 @@ def _compute_annotation(t, bookkeeper=None): from rpython.rtyper.lltypesystem import lltype - from rpython.rtyper import extregistry + from rpython.rtyper.llannotation import lltype_to_annotation if isinstance(t, SomeObject): return t elif isinstance(t, lltype.LowLevelType): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -11,15 +11,11 @@ SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) -from rpython.annotator.bookkeeper import getbookkeeper +from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? from rpython.annotator.model import AnnotatorError -# convenience only! -def immutablevalue(x): - return getbookkeeper().immutablevalue(x) - UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) diff --git a/rpython/flowspace/generator.py b/rpython/flowspace/generator.py --- a/rpython/flowspace/generator.py +++ b/rpython/flowspace/generator.py @@ -1,6 +1,8 @@ """Flow graph building for generators""" from rpython.flowspace.argument import Signature +from rpython.flowspace.bytecode import HostCode +from rpython.flowspace.pygraph import PyGraph from rpython.flowspace.model import (Block, Link, Variable, Constant, checkgraph, const) from rpython.flowspace.operation import op @@ -13,10 +15,17 @@ _immutable_ = True _attrs_ = () -def bootstrap_generator(graph): +def make_generator_entry_graph(func): # This is the first copy of the graph. We replace it with # a small bootstrap graph. - GeneratorIterator = make_generatoriterator_class(graph) + code = HostCode._from_code(func.func_code) + graph = PyGraph(func, code) + block = graph.startblock + for name, w_value in zip(code.co_varnames, block.framestate.mergeable): + if isinstance(w_value, Variable): + w_value.rename(name) + varnames = get_variable_names(graph.startblock.inputargs) + GeneratorIterator = make_generatoriterator_class(varnames) replace_graph_with_bootstrap(GeneratorIterator, graph) # We attach a 'next' method to the GeneratorIterator class # that will invoke the real function, based on a second @@ -30,11 +39,11 @@ tweak_generator_body_graph(GeneratorIterator.Entry, graph) -def make_generatoriterator_class(graph): +def make_generatoriterator_class(var_names): class GeneratorIterator(object): class Entry(AbstractPosition): _immutable_ = True - varnames = get_variable_names(graph.startblock.inputargs) + varnames = var_names def __init__(self, entry): self.current = entry @@ -72,7 +81,7 @@ self.current = next_entry return return_value GeneratorIterator.next = next - return func # for debugging + graph._tweaked_func = func # for testing def get_variable_names(variables): seen = set() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -1,13 +1,13 @@ """Implements the main interface for flow graph creation: build_flow(). """ -from inspect import CO_NEWLOCALS +from inspect import CO_NEWLOCALS, isgeneratorfunction -from rpython.flowspace.model import Variable, checkgraph +from rpython.flowspace.model import checkgraph from rpython.flowspace.bytecode import HostCode from rpython.flowspace.flowcontext import (FlowContext, fixeggblocks) from rpython.flowspace.generator import (tweak_generator_graph, - bootstrap_generator) + make_generator_entry_graph) from rpython.flowspace.pygraph import PyGraph @@ -33,15 +33,10 @@ Create the flow graph for the function. """ _assert_rpythonic(func) + if (isgeneratorfunction(func) and + not hasattr(func, '_generator_next_method_of_')): + return make_generator_entry_graph(func) code = HostCode._from_code(func.func_code) - if (code.is_generator and - not hasattr(func, '_generator_next_method_of_')): - graph = PyGraph(func, code) - block = graph.startblock - for name, w_value in zip(code.co_varnames, block.framestate.mergeable): - if isinstance(w_value, Variable): - w_value.rename(name) - return bootstrap_generator(graph) graph = PyGraph(func, code) ctx = FlowContext(graph, code) ctx.build_flow() diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -65,6 +65,13 @@ # (on CPython they are '==', but not identical either) return ctx.appcall(os.unlink, *args_w) +if os.name == 'nt': + @register_flow_sc(os.path.isdir) + def sc_os_path_isdir(ctx, *args_w): + # Cpython win32 reroutes os.path.isdir to nt._isdir + # which is not rpython + import genericpath + return ctx.appcall(genericpath.isdir, *args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/flowspace/test/test_generator.py b/rpython/flowspace/test/test_generator.py --- a/rpython/flowspace/test/test_generator.py +++ b/rpython/flowspace/test/test_generator.py @@ -1,8 +1,8 @@ from rpython.conftest import option from rpython.flowspace.objspace import build_flow from rpython.flowspace.model import Variable -from rpython.flowspace.generator import (make_generatoriterator_class, - replace_graph_with_bootstrap, get_variable_names, attach_next_method) +from rpython.flowspace.generator import ( + make_generator_entry_graph, get_variable_names) from rpython.translator.simplify import join_blocks @@ -93,14 +93,11 @@ yield n + 1 z -= 10 # - graph = build_flow(f) - GeneratorIterator = make_generatoriterator_class(graph) - replace_graph_with_bootstrap(GeneratorIterator, graph) - func1 = attach_next_method(GeneratorIterator, graph) + graph = make_generator_entry_graph(f) + func1 = graph._tweaked_func if option.view: graph.show() - # - assert func1._generator_next_method_of_ is GeneratorIterator + GeneratorIterator = graph._tweaked_func._generator_next_method_of_ assert hasattr(GeneratorIterator, 'next') # graph_next = build_flow(GeneratorIterator.next.im_func) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1861,20 +1861,26 @@ #END MARKING elif self.gc_state == STATE_SWEEPING: # - # Walk all rawmalloced objects and free the ones that don't - # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. - limit = self.nursery_size // self.ac.page_size - remaining = self.free_unvisited_rawmalloc_objects_step(limit) - # - # Ask the ArenaCollection to visit a fraction of the objects. - # Free the ones that have not been visited above, and reset - # GCFLAG_VISITED on the others. Visit at most '3 * limit' - # pages minus the number of objects already visited above. - done = self.ac.mass_free_incremental(self._free_if_unvisited, - 2 * limit + remaining) + if self.raw_malloc_might_sweep.non_empty(): + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. + # This limit is conservatively high enough to guarantee that + # a total object size of at least '3 * nursery_size' bytes + # is processed. + limit = 3 * self.nursery_size // self.small_request_threshold + self.free_unvisited_rawmalloc_objects_step(limit) + done = False # the 2nd half below must still be done + else: + # Ask the ArenaCollection to visit a fraction of the objects. + # Free the ones that have not been visited above, and reset + # GCFLAG_VISITED on the others. Visit at most '3 * + # nursery_size' bytes. + limit = 3 * self.nursery_size // self.ac.page_size + done = self.ac.mass_free_incremental(self._free_if_unvisited, + limit) # XXX tweak the limits above # - if remaining > 0 and done: + if done: self.num_major_collects += 1 # # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -288,7 +288,9 @@ _about_ = make_sure_not_resized def compute_result_annotation(self, s_arg): - from rpython.annotator.model import SomeList + from rpython.annotator.model import SomeList, s_None + if s_None.contains(s_arg): + return s_arg # only None: just return assert isinstance(s_arg, SomeList) # the logic behind it is that we try not to propagate # make_sure_not_resized, when list comprehension is not on diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -177,7 +177,7 @@ if intval < 0: sign = -1 - ival = r_uint(-intval) + ival = -r_uint(intval) elif intval > 0: sign = 1 ival = r_uint(intval) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -125,15 +125,18 @@ errorhandler=None, allow_surrogates=False): if errorhandler is None: errorhandler = default_unicode_error_decode - return str_decode_utf_8_impl(s, size, errors, final, errorhandler, - allow_surrogates=allow_surrogates) + result = UnicodeBuilder(size) + pos = str_decode_utf_8_impl(s, size, errors, final, errorhandler, + allow_surrogates=allow_surrogates, + result=result) + return result.build(), pos + at specialize.argtype(6) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, - allow_surrogates): + allow_surrogates, result): if size == 0: - return u'', 0 + return 0 - result = UnicodeBuilder(size) pos = 0 while pos < size: ordch1 = ord(s[pos]) @@ -291,7 +294,7 @@ result.append(unichr(0xDC00 + (c & 0x03FF))) pos += 4 - return result.build(), pos + return pos def _encodeUCS4(result, ch): # Encode UCS4 Unicode ordinals diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -104,7 +104,7 @@ return _rweakvaldict.WeakValueDictRepr(rtyper, rtyper.getrepr(self.s_key)) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def method_get(self, s_key): @@ -117,7 +117,7 @@ class __extend__(pairtype(SomeWeakValueDict, SomeWeakValueDict)): def union((s_wvd1, s_wvd2)): if s_wvd1.valueclassdef is not s_wvd2.valueclassdef: - return annmodel.SomeObject() # not the same class! complain... + raise UnionError(s_wvd1, s_wvd2, "not the same class!") s_key = annmodel.unionof(s_wvd1.s_key, s_wvd2.s_key) return SomeWeakValueDict(s_key, s_wvd1.valueclassdef) @@ -164,7 +164,7 @@ from rpython.rlib import _rweakkeydict return _rweakkeydict.WeakKeyDictRepr(rtyper) - def rtyper_makekey_ex(self, rtyper): + def rtyper_makekey(self): return self.__class__, def method_get(self, s_key): @@ -182,9 +182,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - return SomeObject() # not the same key class! complain... + raise UnionError(w_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - return SomeObject() # not the same value class! complain... + raise UnionError(w_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): diff --git a/rpython/rlib/test/test_debug.py b/rpython/rlib/test/test_debug.py --- a/rpython/rlib/test/test_debug.py +++ b/rpython/rlib/test/test_debug.py @@ -53,6 +53,15 @@ py.test.raises(ListChangeUnallowed, interpret, f, [], list_comprehension_operations=True) +def test_make_sure_not_resized_annorder(): + def f(n): + if n > 5: + result = None + else: + result = [1,2,3] + make_sure_not_resized(result) + interpret(f, [10]) + def test_mark_dict_non_null(): def f(): d = {"ac": "bx"} diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -12,6 +12,7 @@ _store_digit, _mask_digit, InvalidEndiannessError, InvalidSignednessError) from rpython.rlib.rfloat import NAN from rpython.rtyper.test.test_llinterp import interpret +from rpython.translator.c.test.test_standalone import StandaloneTests class TestRLong(object): @@ -849,3 +850,17 @@ py.test.raises(InvalidSignednessError, i.tobytes, 3, 'little', signed=False) py.test.raises(OverflowError, i.tobytes, 2, 'little', signed=True) + +class TestTranslated(StandaloneTests): + + def test_gcc_4_9(self): + MIN = -sys.maxint-1 + + def entry_point(argv): + print rbigint.fromint(MIN+1)._digits + print rbigint.fromint(MIN)._digits + return 0 + + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('hi there') + assert data == '[%d]\n[0, 1]\n' % sys.maxint diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -220,8 +220,8 @@ from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr return ControlledInstanceRepr(rtyper, self.s_real_obj, self.controller) - def rtyper_makekey_ex(self, rtyper): - real_key = rtyper.makekey(self.s_real_obj) + def rtyper_makekey(self): + real_key = self.s_real_obj.rtyper_makekey() return self.__class__, real_key, self.controller _make_none_union("SomeControlledInstance", "obj.s_real_obj, obj.controller", globals()) diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,45 +1,15 @@ """ Code for annotating low-level thingies. """ -from types import MethodType from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, - SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, - s_None, s_Bool, UnionError, AnnotatorError, SomeBool) + SomeUnicodeCodePoint, SomeInteger, SomeImpossibleValue, + s_None, s_Bool, UnionError, AnnotatorError) from rpython.rtyper.lltypesystem import lltype, llmemory - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - def getattr(self, s_attr): - assert s_attr.is_constant() - assert isinstance(s_attr, SomeString) - assert s_attr.const in llmemory.supported_access_types - return SomeTypedAddressAccess( - llmemory.supported_access_types[s_attr.const]) - getattr.can_only_throw = [] - - def bool(self): - return s_Bool - -class SomeTypedAddressAccess(SomeObject): - """This class is used to annotate the intermediate value that - appears in expressions of the form: - addr.signed[offset] and addr.signed[offset] = value - """ - - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False +from rpython.rtyper.lltypesystem.lltype import SomePtr +from rpython.rtyper.lltypesystem.llmemory import ( + SomeAddress, SomeTypedAddressAccess) class __extend__(pairtype(SomeAddress, SomeAddress)): @@ -98,69 +68,6 @@ raise UnionError(s_obj, s_addr) -class SomePtr(SomeObject): - knowntype = lltype._ptr - immutable = True - - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.Ptr) - self.ll_ptrtype = ll_ptrtype - - def can_be_none(self): - return False - - def getattr(self, s_attr): - from rpython.annotator.bookkeeper import getbookkeeper - if not s_attr.is_constant(): - raise AnnotatorError("getattr on ptr %r with non-constant " - "field-name" % self.ll_ptrtype) - example = self.ll_ptrtype._example() - try: - v = example._lookup_adtmeth(s_attr.const) - except AttributeError: - v = getattr(example, s_attr.const) - return ll_to_annotation(v) - else: - if isinstance(v, MethodType): - ll_ptrtype = lltype.typeOf(v.im_self) - assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) - return SomeLLADTMeth(ll_ptrtype, v.im_func) - return getbookkeeper().immutablevalue(v) - getattr.can_only_throw = [] - - def len(self): - from rpython.annotator.bookkeeper import getbookkeeper - length = self.ll_ptrtype._example()._fixedlength() - if length is None: - return SomeObject.len(self) - else: - return getbookkeeper().immutablevalue(length) - - def setattr(self, s_attr, s_value): # just doing checking - if not s_attr.is_constant(): - raise AnnotatorError("setattr on ptr %r with non-constant " - "field-name" % self.ll_ptrtype) - example = self.ll_ptrtype._example() - if getattr(example, s_attr.const) is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - setattr(example, s_attr.const, v_lltype._defl()) - - def call(self, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level fn ptr") - info = 'argument to ll function pointer call' - llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] - v = self.ll_ptrtype._example()(*llargs) - return ll_to_annotation(v) - - def bool(self): - result = SomeBool() - if self.is_constant(): - result.const = bool(self.const) - return result - - class SomeInteriorPtr(SomePtr): def __init__(self, ll_ptrtype): From noreply at buildbot.pypy.org Mon May 19 04:40:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 19 May 2014 04:40:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Use with statement here instead of closing the file manually. Message-ID: <20140519024001.2DC001C01E8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r71580:5752672ddec8 Date: 2014-05-19 04:39 +0200 http://bitbucket.org/pypy/pypy/changeset/5752672ddec8/ Log: Use with statement here instead of closing the file manually. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -608,9 +608,8 @@ python_startup = readenv and os.getenv('PYTHONSTARTUP') if python_startup: try: - f = open(python_startup) - startup = f.read() - f.close() + with open(python_startup) as f: + startup = f.read() except IOError as e: print >> sys.stderr, "Could not open PYTHONSTARTUP" print >> sys.stderr, "IOError:", e From noreply at buildbot.pypy.org Mon May 19 10:29:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 May 2014 10:29:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix for 3777204fff8e Message-ID: <20140519082913.B455F1C3331@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71581:7c24973aa476 Date: 2014-05-19 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/7c24973aa476/ Log: Translation fix for 3777204fff8e diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -209,11 +209,13 @@ while size > 0: # "peeks" on the underlying stream to see how many chars # we can safely read without reading past an end-of-line - peeked = stream.peek() - pn = peeked.find("\n", 0, size) + startindex, peeked = stream.peek() + assert 0 <= startindex <= len(peeked) + endindex = startindex + size + pn = peeked.find("\n", startindex, endindex) if pn < 0: - pn = min(size-1, len(peeked)) - c = stream.read(pn + 1) + pn = min(endindex - 1, len(peeked)) + c = stream.read(pn - startindex + 1) if not c: break result.append(c) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -554,7 +554,7 @@ else: difpos = offset if -self.pos <= difpos <= currentsize: - self.pos += difpos + self.pos += intmask(difpos) return if whence == 1: offset -= currentsize From noreply at buildbot.pypy.org Mon May 19 10:38:44 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Mon, 19 May 2014 10:38:44 +0200 (CEST) Subject: [pypy-commit] pypy default: doc: update make.bat to look for Sphinx through Python Message-ID: <20140519083844.695671C3339@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r71582:d83990714816 Date: 2014-04-01 09:50 +0300 http://bitbucket.org/pypy/pypy/changeset/d83990714816/ Log: doc: update make.bat to look for Sphinx through Python diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat --- a/pypy/doc/make.bat +++ b/pypy/doc/make.bat @@ -2,11 +2,15 @@ REM Command file for Sphinx documentation -set SPHINXBUILD=sphinx-build +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help @@ -14,16 +18,25 @@ if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled goto end ) @@ -33,8 +46,34 @@ goto end ) + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 2> nul +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + + if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end @@ -42,13 +81,23 @@ if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end @@ -56,6 +105,7 @@ if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end @@ -63,6 +113,7 @@ if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. @@ -71,6 +122,7 @@ if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: @@ -80,15 +132,85 @@ goto end ) +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end @@ -96,6 +218,7 @@ if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. @@ -104,10 +227,27 @@ if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + :end From noreply at buildbot.pypy.org Mon May 19 10:38:45 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Mon, 19 May 2014 10:38:45 +0200 (CEST) Subject: [pypy-commit] pypy default: doc: update Makefile to look for Sphinx through Python Message-ID: <20140519083845.A3E991C3339@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r71583:eb3a520a832f Date: 2014-04-01 12:21 +0300 http://bitbucket.org/pypy/pypy/changeset/eb3a520a832f/ Log: doc: update Makefile to look for Sphinx through Python diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -7,29 +7,47 @@ PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: # python config/generate.py #readthedocs will not run this Makefile @@ -43,6 +61,12 @@ @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +singlehtml: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + pickle: # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @@ -72,19 +96,74 @@ @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" +devhelp: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy" + @echo "# devhelp" + +epub: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + latex: # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." man: # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man" + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: # python config/generate.py #readthedocs will not run this Makefile @@ -104,3 +183,15 @@ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." + +xml: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + # python config/generate.py #readthedocs will not run this Makefile + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." From noreply at buildbot.pypy.org Mon May 19 10:38:46 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Mon, 19 May 2014 10:38:46 +0200 (CEST) Subject: [pypy-commit] pypy default: doc: Makefile - config/generate.py call migrated to pypyconfig extension Message-ID: <20140519083846.E505C1C3339@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r71584:41dfdac4d6b2 Date: 2014-04-01 17:36 +0300 http://bitbucket.org/pypy/pypy/changeset/41dfdac4d6b2/ Log: doc: Makefile - config/generate.py call migrated to pypyconfig extension diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -50,44 +50,37 @@ rm -rf $(BUILDDIR)/* html: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -97,7 +90,6 @@ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" devhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @@ -107,13 +99,11 @@ @echo "# devhelp" epub: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @@ -121,7 +111,6 @@ "(use \`make latexpdf' here to do that automatically)." latexpdf: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @@ -139,13 +128,11 @@ @echo "Build finished. The text files are in $(BUILDDIR)/text." man: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @@ -153,45 +140,38 @@ "(use \`make info' here to do that automatically)." info: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -22,7 +22,9 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', + 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] From noreply at buildbot.pypy.org Mon May 19 10:38:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 May 2014 10:38:48 +0200 (CEST) Subject: [pypy-commit] pypy default: pull request #220 Message-ID: <20140519083848.2AC2C1C3339@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71585:d3f93da35ed4 Date: 2014-05-19 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/d3f93da35ed4/ Log: pull request #220 diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -7,63 +7,80 @@ PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + pickle: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -72,35 +89,89 @@ @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + latex: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." man: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man" + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -22,7 +22,9 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', + 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat --- a/pypy/doc/make.bat +++ b/pypy/doc/make.bat @@ -2,11 +2,15 @@ REM Command file for Sphinx documentation -set SPHINXBUILD=sphinx-build +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help @@ -14,16 +18,25 @@ if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled goto end ) @@ -33,8 +46,34 @@ goto end ) + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 2> nul +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + + if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end @@ -42,13 +81,23 @@ if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end @@ -56,6 +105,7 @@ if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end @@ -63,6 +113,7 @@ if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. @@ -71,6 +122,7 @@ if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: @@ -80,15 +132,85 @@ goto end ) +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end @@ -96,6 +218,7 @@ if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. @@ -104,10 +227,27 @@ if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + :end From noreply at buildbot.pypy.org Mon May 19 10:40:11 2014 From: noreply at buildbot.pypy.org (techtonik) Date: Mon, 19 May 2014 10:40:11 +0200 (CEST) Subject: [pypy-commit] pypy default: docs: Use read the docs theme if available Message-ID: <20140519084011.47E2A1C02D9@cobra.cs.uni-duesseldorf.de> Author: anatoly techtonik Branch: Changeset: r71586:07e82d634df6 Date: 2014-04-17 17:08 +0300 http://bitbucket.org/pypy/pypy/changeset/07e82d634df6/ Log: docs: Use read the docs theme if available diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -18,6 +18,24 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) + +# -- Read The Docs theme config ------------------------------------------------ + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it + + # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions @@ -91,7 +109,7 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' +#html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From noreply at buildbot.pypy.org Mon May 19 10:40:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 May 2014 10:40:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in techtonik/pypy (pull request #230) Message-ID: <20140519084012.92A5C1C02D9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71587:250638982fbe Date: 2014-05-19 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/250638982fbe/ Log: Merged in techtonik/pypy (pull request #230) docs: Use read the docs theme if available diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -18,6 +18,24 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) + +# -- Read The Docs theme config ------------------------------------------------ + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it + + # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions @@ -93,7 +111,7 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' +#html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From noreply at buildbot.pypy.org Mon May 19 14:39:45 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 19 May 2014 14:39:45 +0200 (CEST) Subject: [pypy-commit] pypy default: this XXX is no longer needed, __debug__ works correctly nowadays, as the Message-ID: <20140519123945.988FB1C1106@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r71588:91bee71be820 Date: 2014-04-25 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/91bee71be820/ Log: this XXX is no longer needed, __debug__ works correctly nowadays, as the __pypy__ module sets diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -33,7 +33,7 @@ interpleveldefs = { # constants - '__debug__' : '(space.w_True)', # XXX + '__debug__' : '(space.w_True)', 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', From noreply at buildbot.pypy.org Mon May 19 14:39:46 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 19 May 2014 14:39:46 +0200 (CEST) Subject: [pypy-commit] pypy default: add another signature to a string method to fix a random translation error Message-ID: <20140519123946.D13341C1106@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r71589:fbe93314fa42 Date: 2014-05-19 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/fbe93314fa42/ Log: add another signature to a string method to fix a random translation error diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -834,6 +834,7 @@ def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) + @signature(types.any(), types.int(), types.int(), returns=types.any()) def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): if stop > len(s1.chars): From noreply at buildbot.pypy.org Mon May 19 14:39:47 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 19 May 2014 14:39:47 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140519123947.F1A601C1106@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r71590:dca4d7417953 Date: 2014-05-19 13:39 +0100 http://bitbucket.org/pypy/pypy/changeset/dca4d7417953/ Log: merge diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -33,7 +33,7 @@ interpleveldefs = { # constants - '__debug__' : '(space.w_True)', # XXX + '__debug__' : '(space.w_True)', 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', From noreply at buildbot.pypy.org Mon May 19 15:31:30 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 May 2014 15:31:30 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: initial sketch of API Message-ID: <20140519133130.E2A621C3382@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1211:38e68ec4f6b5 Date: 2014-05-19 15:32 +0200 http://bitbucket.org/pypy/stmgc/changeset/38e68ec4f6b5/ Log: initial sketch of API diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -40,8 +40,13 @@ #endif } -void _stm_write_slowpath(object_t *obj) +void _stm_write_slowpath(object_t *obj, uintptr_t offset) { + assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), + offset == 0)); + if (offset) + abort(); + assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -35,6 +35,8 @@ #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) #define WRITELOCK_END READMARKER_END +#define CARD_SIZE _STM_CARD_SIZE + enum /* stm_flags */ { /* This flag is set on non-nursery objects. It forces stm_write() to call _stm_write_slowpath(). @@ -54,6 +56,9 @@ after the object. */ GCFLAG_HAS_SHADOW = 0x04, + /* Set on objects after allocation that may use card marking */ + GCFLAG_HAS_CARDS = _STM_GCFLAG_HAS_CARDS, + /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the "overflow objects" from the current transaction among all old @@ -61,7 +66,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x10 /* must be last */ }; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -83,6 +83,7 @@ { /* Check that some values are acceptable */ assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); + assert(CARD_SIZE > 0 && CARD_SIZE % 16 == 0); assert(4096 <= ((uintptr_t)STM_SEGMENT)); assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -106,7 +106,7 @@ /* this should use llvm's coldcc calling convention, but it's not exposed to C code so far */ -void _stm_write_slowpath(object_t *); +void _stm_write_slowpath(object_t *, uintptr_t); object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); @@ -143,6 +143,8 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 +#define _STM_GCFLAG_HAS_CARDS 0x08 +#define _STM_CARD_SIZE 16 /* modulo 16 == 0! */ #define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) @@ -210,7 +212,24 @@ static inline void stm_write(object_t *obj) { if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) - _stm_write_slowpath(obj); + _stm_write_slowpath(obj, 0); +} + +/* The following are barriers that work on the granularity of CARD_SIZE. + They can only be used on objects one called stm_use_cards() on. */ +__attribute__((always_inline)) +static inline void stm_read_card(object_t *obj, uintptr_t offset) +{ + OPT_ASSERT(obj->stm_flags & _STM_GCFLAG_HAS_CARDS); + ((stm_read_marker_t *)(((uintptr_t)obj + offset) >> 4))->rm = + STM_SEGMENT->transaction_read_version; +} +__attribute__((always_inline)) +static inline void stm_write_card(object_t *obj, uintptr_t offset) +{ + OPT_ASSERT(obj->stm_flags & _STM_GCFLAG_HAS_CARDS); + if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) + _stm_write_slowpath(obj, offset); } /* Must be provided by the user of this library. @@ -248,6 +267,16 @@ return (object_t *)p; } +/* directly after allocation one can enable card marking for any + kind of object with stm_use_cards(obj). This enables the use + of stm_write/read_card() barriers that do more fine-grained + conflict detection and garbage collection. */ +__attribute__((always_inline)) +static inline void stm_use_cards(object_t* o) +{ + o->stm_flags |= _STM_GCFLAG_HAS_CARDS; +} + /* Allocate a weakref object. Weakref objects have a reference to an object at the byte-offset stmcb_size_rounded_up(obj) - sizeof(void*) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -12,6 +12,8 @@ #define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... +#define _STM_GCFLAG_HAS_CARDS ... +#define _STM_CARD_SIZE ... #define STM_STACK_MARKER_NEW ... #define STM_STACK_MARKER_OLD ... @@ -41,6 +43,11 @@ object_t *stm_allocate_weakref(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); +void stm_use_cards(object_t* o); +void stm_read_card(object_t *obj, uintptr_t offset); +/*void stm_write_card(); use _checked_stm_write_card() instead */ + + void stm_setup(void); void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); @@ -49,6 +56,7 @@ object_t *stm_setup_prebuilt_weakref(object_t *); bool _checked_stm_write(object_t *obj); +bool _checked_stm_write_card(object_t *obj, uintptr_t offset); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); char *_stm_real_address(object_t *obj); @@ -181,6 +189,10 @@ CHECKED(stm_write(object)); } +bool _checked_stm_write_card(object_t *object, uintptr_t offset) { + CHECKED(stm_write_card(object, offset)); +} + bool _check_stop_safe_point(void) { CHECKED(_stm_stop_safe_point()); } @@ -323,6 +335,8 @@ HDR = lib.SIZEOF_MYOBJ assert HDR == 8 GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER +GCFLAG_HAS_CARDS = lib._STM_GCFLAG_HAS_CARDS +CARD_SIZE = lib._STM_CARD_SIZE # 16b at least NB_SEGMENTS = lib.STM_NB_SEGMENTS @@ -335,20 +349,26 @@ def is_in_nursery(o): return lib.stm_can_move(o) -def stm_allocate_old(size): +def stm_allocate_old(size, use_cards=False): o = lib._stm_allocate_old(size) + if use_cards: + lib.stm_use_cards(o) tid = 42 + size lib._set_type_id(o, tid) return o -def stm_allocate_old_refs(n): +def stm_allocate_old_refs(n, use_cards=False): o = lib._stm_allocate_old(HDR + n * WORD) + if use_cards: + lib.stm_use_cards(o) tid = 421420 + n lib._set_type_id(o, tid) return o -def stm_allocate(size): +def stm_allocate(size, use_cards=False): o = lib.stm_allocate(size) + if use_cards: + lib.stm_use_cards(o) tid = 42 + size lib._set_type_id(o, tid) return o @@ -365,8 +385,10 @@ def stm_get_weakref(o): return lib._get_weakref(o) -def stm_allocate_refs(n): +def stm_allocate_refs(n, use_cards=False): o = lib.stm_allocate(HDR + n * WORD) + if use_cards: + lib.stm_use_cards(o) tid = 421420 + n lib._set_type_id(o, tid) return o @@ -395,10 +417,21 @@ def stm_read(o): lib.stm_read(o) +def stm_read_card(o, offset): + assert stm_get_flags(o) & GCFLAG_HAS_CARDS + assert offset < stm_get_obj_size(o) + lib.stm_read_card(o, offset) + def stm_write(o): if lib._checked_stm_write(o): raise Conflict() +def stm_write_card(o, offset): + assert stm_get_flags(o) & GCFLAG_HAS_CARDS + assert offset < stm_get_obj_size(o) + if lib._checked_stm_write_card(o, offset): + raise Conflict() + def stm_was_read(o): return lib._stm_was_read(o) From noreply at buildbot.pypy.org Mon May 19 17:38:18 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 May 2014 17:38:18 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: expand the tests (still failing) Message-ID: <20140519153818.A14DC1C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1212:9759aad4236b Date: 2014-05-19 15:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/9759aad4236b/ Log: expand the tests (still failing) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -44,8 +44,6 @@ { assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), offset == 0)); - if (offset) - abort(); assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); @@ -73,7 +71,7 @@ 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + uintptr_t lock_idx = get_write_lock_idx(obj); uint8_t lock_num = STM_PSEGMENT->write_lock_num; assert(lock_idx < sizeof(write_locks)); retry: diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -220,6 +220,10 @@ #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) +static inline uintptr_t get_write_lock_idx(object_t *obj) { + return (((uintptr_t)obj) >> 4) - WRITELOCK_START; +} + static inline char *get_segment_base(long segment_num) { return stm_object_pages + segment_num * (NB_PAGES * 4096UL); } @@ -252,6 +256,15 @@ return rm == other_transaction_read_version; } +static inline bool was_read_remote_card(char *base, object_t *obj, uintptr_t offset, + uint8_t other_transaction_read_version) +{ + uint8_t rm = ((struct stm_read_marker_s *) + (base + (((uintptr_t)obj + offset) >> 4)))->rm; + assert(rm <= other_transaction_read_version); + return rm == other_transaction_read_version; +} + static inline void _duck(void) { /* put a call to _duck() between two instructions that set 0 into a %gs-prefixed address and that may otherwise be replaced with diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -40,6 +40,18 @@ return (obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) == 0; } +bool _stm_was_read_card(object_t *obj, uintptr_t offset) +{ + return was_read_remote_card( + STM_SEGMENT->segment_base, obj, offset, + STM_SEGMENT->transaction_read_version); +} + +bool _stm_was_written_card(object_t *obj, uintptr_t offset) +{ + return write_locks[get_write_lock_idx((object_t*)((uintptr_t)obj + offset))]; +} + #ifdef STM_TESTS uintptr_t _stm_get_private_page(uintptr_t pagenum) { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -120,6 +120,8 @@ #include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +bool _stm_was_read_card(object_t *obj, uintptr_t offset); +bool _stm_was_written_card(object_t *obj, uintptr_t offset); uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -58,7 +58,9 @@ bool _checked_stm_write(object_t *obj); bool _checked_stm_write_card(object_t *obj, uintptr_t offset); bool _stm_was_read(object_t *obj); +bool _stm_was_read_card(object_t *obj, uintptr_t offset); bool _stm_was_written(object_t *obj); +bool _stm_was_written_card(object_t *obj, uintptr_t offset); char *_stm_real_address(object_t *obj); char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); @@ -438,6 +440,12 @@ def stm_was_written(o): return lib._stm_was_written(o) +def stm_was_read_card(o, offset): + return lib._stm_was_read_card(o, offset) + +def stm_was_written_card(o, offset): + return lib._stm_was_written_card(o, offset) + def stm_start_safe_point(): lib._stm_start_safe_point() From noreply at buildbot.pypy.org Mon May 19 17:38:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 May 2014 17:38:19 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: wip Message-ID: <20140519153819.B08561C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1213:aeafbbe2bb03 Date: 2014-05-19 16:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/aeafbbe2bb03/ Log: wip diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -40,6 +40,39 @@ #endif } +static bool _stm_write_slowpath_overflow_objs(object_t *obj, uintptr_t offset) +{ + /* is this an object from the same transaction, outside the nursery? */ + if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) + == STM_PSEGMENT->overflow_number) { + + assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); + + if (!offset) { + /* no card to be marked */ + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + } else { + /* don't remove GCFLAG_WRITE_BARRIER because we need to be + here for every card to mark */ + if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { + /* not yet in the list */ + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + obj->stm_flags |= GCFLAG_CARDS_SET; + } + + /* just acquire the corresponding lock for the next minor_collection + to know what may have changed. only we know about this object: */ + uintptr_t lock_idx = get_write_lock_idx((uintptr_t)obj + offset); + assert(!write_locks[lock_idx]); + write_locks[lock_idx] = STM_PSEGMENT->write_lock_num; + } + return true; + } + return false; +} + void _stm_write_slowpath(object_t *obj, uintptr_t offset) { assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), @@ -49,16 +82,8 @@ assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - /* is this an object from the same transaction, outside the nursery? */ - if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == - STM_PSEGMENT->overflow_number) { - - dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + if (_stm_write_slowpath_overflow_objs(obj, offset)) return; - } /* do a read-barrier now. Note that this must occur before the safepoints that may be issued in write_write_contention_management(). */ @@ -71,7 +96,7 @@ 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ - uintptr_t lock_idx = get_write_lock_idx(obj); + uintptr_t lock_idx = get_write_lock_idx((uintptr_t)obj); uint8_t lock_num = STM_PSEGMENT->write_lock_num; assert(lock_idx < sizeof(write_locks)); retry: diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -58,6 +58,8 @@ /* Set on objects after allocation that may use card marking */ GCFLAG_HAS_CARDS = _STM_GCFLAG_HAS_CARDS, + /* Set on objects that have at least one card marked */ + GCFLAG_CARDS_SET = _STM_GCFLAG_CARDS_SET, /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the @@ -66,7 +68,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x10 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x20 /* must be last */ }; @@ -220,8 +222,8 @@ #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) -static inline uintptr_t get_write_lock_idx(object_t *obj) { - return (((uintptr_t)obj) >> 4) - WRITELOCK_START; +static inline uintptr_t get_write_lock_idx(uintptr_t obj) { + return (obj >> 4) - WRITELOCK_START; } static inline char *get_segment_base(long segment_num) { diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -47,9 +47,9 @@ STM_SEGMENT->transaction_read_version); } -bool _stm_was_written_card(object_t *obj, uintptr_t offset) +bool _stm_was_written_card(object_t *obj) { - return write_locks[get_write_lock_idx((object_t*)((uintptr_t)obj + offset))]; + return obj->stm_flags & _STM_GCFLAG_CARDS_SET; } #ifdef STM_TESTS diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -84,6 +84,7 @@ /* Check that some values are acceptable */ assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); assert(CARD_SIZE > 0 && CARD_SIZE % 16 == 0); + assert(CARD_SIZE == 16); /* actually, it is hardcoded in some places right now.. */ assert(4096 <= ((uintptr_t)STM_SEGMENT)); assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -121,7 +121,7 @@ bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); bool _stm_was_read_card(object_t *obj, uintptr_t offset); -bool _stm_was_written_card(object_t *obj, uintptr_t offset); +bool _stm_was_written_card(object_t *obj); uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); @@ -146,6 +146,7 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_GCFLAG_HAS_CARDS 0x08 +#define _STM_GCFLAG_CARDS_SET 0x10 #define _STM_CARD_SIZE 16 /* modulo 16 == 0! */ #define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -60,7 +60,7 @@ bool _stm_was_read(object_t *obj); bool _stm_was_read_card(object_t *obj, uintptr_t offset); bool _stm_was_written(object_t *obj); -bool _stm_was_written_card(object_t *obj, uintptr_t offset); +bool _stm_was_written_card(object_t *obj); char *_stm_real_address(object_t *obj); char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); @@ -443,8 +443,8 @@ def stm_was_read_card(o, offset): return lib._stm_was_read_card(o, offset) -def stm_was_written_card(o, offset): - return lib._stm_was_written_card(o, offset) +def stm_was_written_card(o): + return lib._stm_was_written_card(o) def stm_start_safe_point(): From noreply at buildbot.pypy.org Mon May 19 17:38:20 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 May 2014 17:38:20 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: introduce old_objects_with_cards; 2 tests pass Message-ID: <20140519153820.C8AFD1C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1214:e6dc7f070560 Date: 2014-05-19 16:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/e6dc7f070560/ Log: introduce old_objects_with_cards; 2 tests pass diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -58,7 +58,7 @@ here for every card to mark */ if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { /* not yet in the list */ - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); obj->stm_flags |= GCFLAG_CARDS_SET; } @@ -68,8 +68,12 @@ assert(!write_locks[lock_idx]); write_locks[lock_idx] = STM_PSEGMENT->write_lock_num; } + + /* We don't need to do anything in the STM part of the WB slowpath: */ return true; } + + /* continue in STM part with no-overflow object */ return false; } @@ -512,6 +516,7 @@ /* reset these lists to NULL for the next transaction */ LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); + LIST_FREE(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); timing_end_transaction(attribute_to); @@ -690,6 +695,7 @@ /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); + LIST_FREE(pseg->old_objects_with_cards); LIST_FREE(pseg->large_overflow_objects); list_clear(pseg->young_weakrefs); } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -103,6 +103,10 @@ understood as meaning implicitly "this is the same as 'modified_old_objects'". */ struct list_s *objects_pointing_to_nursery; + /* Like objects_pointing_to_nursery it holds the old objects that + we did a stm_write_card() on. Objects can be in both lists. + It is NULL iff objects_pointing_to_nursery is NULL. */ + struct list_s *old_objects_with_cards; /* List of all large, overflowed objects. Only non-NULL after the current transaction spanned a minor collection. */ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -332,6 +332,7 @@ uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + STM_PSEGMENT->old_objects_with_cards = list_create(); /* See the doc of 'objects_pointing_to_nursery': if it is NULL, then it is implicitly understood to be equal to diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -119,6 +119,7 @@ pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->objects_pointing_to_nursery = NULL; + pr->old_objects_with_cards = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); pr->modified_old_objects_markers = list_create(); @@ -158,6 +159,7 @@ for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->objects_pointing_to_nursery == NULL); + assert(pr->old_objects_with_cards == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); list_free(pr->modified_old_objects_markers); From noreply at buildbot.pypy.org Mon May 19 17:38:21 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 May 2014 17:38:21 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: wip Message-ID: <20140519153821.DA65E1C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1215:cde500a7d39e Date: 2014-05-19 17:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/cde500a7d39e/ Log: wip diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -79,9 +79,7 @@ void _stm_write_slowpath(object_t *obj, uintptr_t offset) { - assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), - offset == 0)); - + assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), offset == 0)); assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); @@ -100,16 +98,18 @@ 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ - uintptr_t lock_idx = get_write_lock_idx((uintptr_t)obj); + bool lock_whole = offset == 0; + uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); + //uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj + offset); uint8_t lock_num = STM_PSEGMENT->write_lock_num; - assert(lock_idx < sizeof(write_locks)); + assert(base_lock_idx < sizeof(write_locks)); retry: - if (write_locks[lock_idx] == 0) { + if (write_locks[base_lock_idx] == 0) { /* A lock to prevent reading garbage from lookup_other_thread_recorded_marker() */ acquire_marker_lock(STM_SEGMENT->segment_base); - if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[base_lock_idx], 0, lock_num))) { release_marker_lock(STM_SEGMENT->segment_base); goto retry; @@ -159,7 +159,7 @@ } } } - else if (write_locks[lock_idx] == lock_num) { + else if (write_locks[base_lock_idx] == lock_num) { OPT_ASSERT(STM_PSEGMENT->objects_pointing_to_nursery != NULL); #ifdef STM_TESTS bool found = false; @@ -171,33 +171,47 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx, obj); + write_write_contention_management(base_lock_idx, obj); goto retry; } /* A common case for write_locks[] that was either 0 or lock_num: - we need to add the object to 'objects_pointing_to_nursery' - if there is such a list. */ - if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { - dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + we need to add the object to the appropriate list if there is one. */ + if (lock_whole) { + if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { + dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + } + + /* check that we really have a private page */ + assert(is_private_page(STM_SEGMENT->segment_num, + ((uintptr_t)obj) / 4096)); + /* check that so far all copies of the object have the flag */ + check_flag_write_barrier(obj); + + /* remove GCFLAG_WRITE_BARRIER if we succeeded in getting the base + write-lock (not for card marking). */ + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + + /* for sanity, check again that all other segment copies of this + object still have the flag (so privatization worked) */ + check_flag_write_barrier(obj); + + } else { /* card marking case */ + + if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { + /* not yet in the list (may enter here multiple times) */ + if (STM_PSEGMENT->old_objects_with_cards != NULL) { + LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); + } + obj->stm_flags |= GCFLAG_CARDS_SET; + } + + /* check that we really have a private page */ + assert(is_private_page(STM_SEGMENT->segment_num, + ((uintptr_t)obj + offset) / 4096)); } - - /* check that we really have a private page */ - assert(is_private_page(STM_SEGMENT->segment_num, - ((uintptr_t)obj) / 4096)); - - /* check that so far all copies of the object have the flag */ - check_flag_write_barrier(obj); - - /* remove GCFLAG_WRITE_BARRIER, but only if we succeeded in - getting the write-lock */ - assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - - /* for sanity, check again that all other segment copies of this - object still have the flag (so privatization worked) */ - check_flag_write_barrier(obj); } static void reset_transaction_read_version(void) From noreply at buildbot.pypy.org Mon May 19 17:38:22 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 19 May 2014 17:38:22 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: make the 3 simple tests pass Message-ID: <20140519153822.D7DE41C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1216:1ce81b961157 Date: 2014-05-19 17:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/1ce81b961157/ Log: make the 3 simple tests pass diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -183,19 +183,42 @@ minor_trace_if_young(&tl->thread_local_obj); } +static void minor_trace_if_young_cards(object_t **pobj) +{ + /* XXX: maybe add a specialised stmcb_trace_cards() */ + object_t *obj = *pobj; + if (write_locks[get_write_lock_idx((uintptr_t)obj)]) + minor_trace_if_young(pobj); +} + static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); - /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ - assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); - obj->stm_flags |= GCFLAG_WRITE_BARRIER; + /* If WRITE_BARRIER: CARDS_SET */ + /* If not WRITE_BARRIER: maybe CARDS_SET */ + assert(IMPLY(obj->stm_flags & GCFLAG_WRITE_BARRIER, + obj->stm_flags & GCFLAG_CARDS_SET)); + if (!(obj->stm_flags & GCFLAG_WRITE_BARRIER)) { + /* do normal full trace, even if also card-marked */ + obj->stm_flags |= GCFLAG_WRITE_BARRIER; - /* Trace the 'obj' to replace pointers to nursery with pointers - outside the nursery, possibly forcing nursery objects out and - adding them to 'objects_pointing_to_nursery' as well. */ - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + /* Trace the 'obj' to replace pointers to nursery with pointers + outside the nursery, possibly forcing nursery objects out and + adding them to 'objects_pointing_to_nursery' as well. */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + } else { + /* only trace cards */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); + } + + /* clear the CARDS_SET, but not the real cards since they are + still needed by STM conflict detection + XXX: maybe separate them since we now have to also trace all + these cards again in the next minor_collection */ + obj->stm_flags &= ~GCFLAG_CARDS_SET; } static void collect_oldrefs_to_nursery(void) From noreply at buildbot.pypy.org Mon May 19 21:36:51 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 21:36:51 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register annotators for the base case of comparison operators Message-ID: <20140519193651.2A7701D2D1C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71591:8fe91928ab37 Date: 2014-05-19 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/8fe91928ab37/ Log: register annotators for the base case of comparison operators diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -58,6 +58,18 @@ r.set_knowntypedata(knowntypedata) return r +def _make_cmp_annotator_default(cmp_op): + @cmp_op.register(SomeObject, SomeObject) + def default_annotate(obj1, obj2): + s_1, s_2 = obj1.ann, obj2.ann + if s_1.is_immutable_constant() and s_2.is_immutable_constant(): + return immutablevalue(cmp_op.pyfunc(s_1.const, s_2.const)) + else: + return s_Bool + +for cmp_op in [op.lt, op.le, op.eq, op.ne, op.gt, op.ge]: + _make_cmp_annotator_default(cmp_op) + class __extend__(pairtype(SomeObject, SomeObject)): def union((obj1, obj2)): @@ -86,42 +98,6 @@ inplace_floordiv.can_only_throw = [ZeroDivisionError] inplace_mod.can_only_throw = [ZeroDivisionError] - def lt((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const < obj2.const) - else: - return s_Bool - - def le((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const <= obj2.const) - else: - return s_Bool - - def eq((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const == obj2.const) - else: - return s_Bool - - def ne((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const != obj2.const) - else: - return s_Bool - - def gt((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const > obj2.const) - else: - return s_Bool - - def ge((obj1, obj2)): - if obj1.is_immutable_constant() and obj2.is_immutable_constant(): - return immutablevalue(obj1.const >= obj2.const) - else: - return s_Bool - def cmp((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) From noreply at buildbot.pypy.org Mon May 19 21:36:52 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 21:36:52 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register annotators for comparisons on integers Message-ID: <20140519193652.57C951D2D1C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71592:51c01e84ed87 Date: 2014-05-19 19:26 +0100 http://bitbucket.org/pypy/pypy/changeset/51c01e84ed87/ Log: register annotators for comparisons on integers diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -3,7 +3,6 @@ """ import py -import operator from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, @@ -243,10 +242,14 @@ return SomeInteger(nonneg=int1.nonneg, knowntype=int1.knowntype) rshift.can_only_throw = [] - def _compare_helper((int1, int2), opname, operation): + +def _make_cmp_annotator_int(cmp_op): + @cmp_op.register(SomeInteger, SomeInteger) + def _compare_helper(int1, int2): r = SomeBool() - if int1.is_immutable_constant() and int2.is_immutable_constant(): - r.const = operation(int1.const, int2.const) + s_int1, s_int2 = int1.ann, int2.ann + if s_int1.is_immutable_constant() and s_int2.is_immutable_constant(): + r.const = cmp_op.pyfunc(s_int1.const, s_int2.const) # # The rest of the code propagates nonneg information between # the two arguments. @@ -258,45 +261,38 @@ # nonneg then "assert x>=y" will let the annotator know that # x is nonneg too, but it will not work if y is unsigned. # - if not (rarithmetic.signedtype(int1.knowntype) and - rarithmetic.signedtype(int2.knowntype)): + if not (rarithmetic.signedtype(s_int1.knowntype) and + rarithmetic.signedtype(s_int2.knowntype)): return r knowntypedata = {} - op = getbookkeeper()._find_current_op(opname=opname, arity=2) - def tointtype(int0): - if int0.knowntype is bool: + def tointtype(s_int0): + if s_int0.knowntype is bool: return int - return int0.knowntype - if int1.nonneg and isinstance(op.args[1], Variable): - case = opname in ('lt', 'le', 'eq') - - add_knowntypedata(knowntypedata, case, [op.args[1]], - SomeInteger(nonneg=True, knowntype=tointtype(int2))) - if int2.nonneg and isinstance(op.args[0], Variable): - case = opname in ('gt', 'ge', 'eq') - add_knowntypedata(knowntypedata, case, [op.args[0]], - SomeInteger(nonneg=True, knowntype=tointtype(int1))) + return s_int0.knowntype + if s_int1.nonneg and isinstance(int2.value, Variable): + case = cmp_op.opname in ('lt', 'le', 'eq') + add_knowntypedata(knowntypedata, case, [int2.value], + SomeInteger(nonneg=True, knowntype=tointtype(s_int2))) + if s_int2.nonneg and isinstance(int1.value, Variable): + case = cmp_op.opname in ('gt', 'ge', 'eq') + add_knowntypedata(knowntypedata, case, [int1.value], + SomeInteger(nonneg=True, knowntype=tointtype(s_int1))) r.set_knowntypedata(knowntypedata) # a special case for 'x < 0' or 'x >= 0', # where 0 is a flow graph Constant # (in this case we are sure that it cannot become a r_uint later) - if (isinstance(op.args[1], Constant) and - type(op.args[1].value) is int and # filter out Symbolics - op.args[1].value == 0): - if int1.nonneg: - if opname == 'lt': + if (isinstance(int2.value, Constant) and + type(int2.value.value) is int and # filter out Symbolics + int2.value.value == 0): + if s_int1.nonneg: + if cmp_op.opname == 'lt': r.const = False - if opname == 'ge': + if cmp_op.opname == 'ge': r.const = True return r - def lt(intint): return intint._compare_helper('lt', operator.lt) - def le(intint): return intint._compare_helper('le', operator.le) - def eq(intint): return intint._compare_helper('eq', operator.eq) - def ne(intint): return intint._compare_helper('ne', operator.ne) - def gt(intint): return intint._compare_helper('gt', operator.gt) - def ge(intint): return intint._compare_helper('ge', operator.ge) - +for cmp_op in [op.lt, op.le, op.eq, op.ne, op.gt, op.ge]: + _make_cmp_annotator_int(cmp_op) class __extend__(pairtype(SomeBool, SomeBool)): From noreply at buildbot.pypy.org Mon May 19 21:43:33 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 19 May 2014 21:43:33 +0200 (CEST) Subject: [pypy-commit] pypy default: kill unused SomeBuiltinMethod Message-ID: <20140519194333.E5C0F1C02D9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71593:583fddd89a2e Date: 2014-05-19 20:43 +0100 http://bitbucket.org/pypy/pypy/changeset/583fddd89a2e/ Log: kill unused SomeBuiltinMethod diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -533,12 +533,6 @@ return False -class SomeBuiltinMethod(SomeBuiltin): - """ Stands for a built-in method which has got special meaning - """ - knowntype = MethodType - - class SomeImpossibleValue(SomeObject): """The empty set. Instances are placeholders for objects that will never show up at run-time, e.g. elements of an empty list.""" From noreply at buildbot.pypy.org Tue May 20 02:41:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 20 May 2014 02:41:31 +0200 (CEST) Subject: [pypy-commit] pypy py3k: attempt to get more debug output Message-ID: <20140520004131.8BF281C02D9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71595:8ca6146f84f2 Date: 2014-05-19 17:40 -0700 http://bitbucket.org/pypy/pypy/changeset/8ca6146f84f2/ Log: attempt to get more debug output diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -116,6 +116,7 @@ except BaseException as e: try: + initstdio() stderr = sys.stderr print('Error calling sys.excepthook:', file=stderr) originalexcepthook(type(e), e, e.__traceback__) From noreply at buildbot.pypy.org Tue May 20 03:13:08 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 May 2014 03:13:08 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix. Message-ID: <20140520011308.1E2DB1C02D9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71596:7a33e5f4c334 Date: 2014-05-20 02:59 +0200 http://bitbucket.org/pypy/pypy/changeset/7a33e5f4c334/ Log: Fix. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -122,7 +122,7 @@ def descr_len(self, space): self._check_released(space) - return space.wrap(self.buf.getlength()) + return space.wrap(self.getlength()) def w_get_format(self, space): self._check_released(space) From noreply at buildbot.pypy.org Tue May 20 03:30:03 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 May 2014 03:30:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Make lib-python tests work when pypy is translated without cpyext. Message-ID: <20140520013003.7B4E01D2CEE@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71597:627832fff626 Date: 2014-05-20 03:29 +0200 http://bitbucket.org/pypy/pypy/changeset/627832fff626/ Log: Make lib-python tests work when pypy is translated without cpyext. diff --git a/lib-python/3/test/support.py b/lib-python/3/test/support.py --- a/lib-python/3/test/support.py +++ b/lib-python/3/test/support.py @@ -25,7 +25,6 @@ import logging.handlers import struct import tempfile -import _testcapi try: import _thread, threading @@ -1145,6 +1144,7 @@ _TPFLAGS_HEAPTYPE = 1<<9 def check_sizeof(test, o, size): + import _testcapi result = sys.getsizeof(o) # add GC header size if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\ From noreply at buildbot.pypy.org Tue May 20 04:46:28 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 20 May 2014 04:46:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Split SomeBuiltinMethod from SomeBuiltin Message-ID: <20140520024628.9EC711C11FD@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71598:d11d5d0dc890 Date: 2014-05-20 02:28 +0100 http://bitbucket.org/pypy/pypy/changeset/d11d5d0dc890/ Log: Split SomeBuiltinMethod from SomeBuiltin diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -9,7 +9,7 @@ SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, - SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, + SomeBuiltinMethod, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, @@ -730,15 +730,14 @@ return SomeIterator(s_cont, *iter1.variant) -class __extend__(pairtype(SomeBuiltin, SomeBuiltin)): - +class __extend__(pairtype(SomeBuiltinMethod, SomeBuiltinMethod)): def union((bltn1, bltn2)): if (bltn1.analyser != bltn2.analyser or - bltn1.methodname != bltn2.methodname or - bltn1.s_self is None or bltn2.s_self is None): + bltn1.methodname != bltn2.methodname): raise UnionError(bltn1, bltn2) s_self = unionof(bltn1.s_self, bltn2.s_self) - return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) + return SomeBuiltinMethod(bltn1.analyser, s_self, + methodname=bltn1.methodname) class __extend__(pairtype(SomePBC, SomePBC)): diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -533,6 +533,20 @@ return False +class SomeBuiltinMethod(SomeBuiltin): + """ Stands for a built-in method which has got special meaning + """ + def __init__(self, analyser, s_self, methodname): + if isinstance(analyser, MethodType): + analyser = descriptor.InstanceMethod( + analyser.im_func, + analyser.im_self, + analyser.im_class) + self.analyser = analyser + self.s_self = s_self + self.methodname = methodname + + class SomeImpossibleValue(SomeObject): """The empty set. Instances are placeholders for objects that will never show up at run-time, e.g. elements of an empty list.""" diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -7,8 +7,8 @@ from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, - SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeType, s_ImpossibleValue, + SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, + SomeFloat, SomeIterator, SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue @@ -108,7 +108,7 @@ except AttributeError: return None else: - return SomeBuiltin(analyser, self, name) + return SomeBuiltinMethod(analyser, self, name) def getattr(self, s_attr): # get a SomeBuiltin if the SomeObject has diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -11,37 +11,32 @@ class __extend__(annmodel.SomeBuiltin): def rtyper_makerepr(self, rtyper): - if self.s_self is None: - # built-in function case - if not self.is_constant(): - raise TyperError("non-constant built-in function!") - return BuiltinFunctionRepr(self.const) - else: - # built-in method case - assert self.methodname is not None - result = BuiltinMethodRepr(rtyper, self.s_self, self.methodname) - return result + if not self.is_constant(): + raise TyperError("non-constant built-in function!") + return BuiltinFunctionRepr(self.const) + def rtyper_makekey(self): - if self.s_self is None: - # built-in function case + const = getattr(self, 'const', None) + if extregistry.is_registered(const): + const = extregistry.lookup(const) + return self.__class__, const - const = getattr(self, 'const', None) +class __extend__(annmodel.SomeBuiltinMethod): + def rtyper_makerepr(self, rtyper): + assert self.methodname is not None + result = BuiltinMethodRepr(rtyper, self.s_self, self.methodname) + return result - if extregistry.is_registered(const): - const = extregistry.lookup(const) - - return self.__class__, const - else: - # built-in method case - # NOTE: we hash by id of self.s_self here. This appears to be - # necessary because it ends up in hop.args_s[0] in the method call, - # and there is no telling what information the called - # rtype_method_xxx() will read from that hop.args_s[0]. - # See test_method_join in test_rbuiltin. - # There is no problem with self.s_self being garbage-collected and - # its id reused, because the BuiltinMethodRepr keeps a reference - # to it. - return (self.__class__, self.methodname, id(self.s_self)) + def rtyper_makekey(self): + # NOTE: we hash by id of self.s_self here. This appears to be + # necessary because it ends up in hop.args_s[0] in the method call, + # and there is no telling what information the called + # rtype_method_xxx() will read from that hop.args_s[0]. + # See test_method_join in test_rbuiltin. + # There is no problem with self.s_self being garbage-collected and + # its id reused, because the BuiltinMethodRepr keeps a reference + # to it. + return (self.__class__, self.methodname, id(self.s_self)) def call_args_expand(hop, takes_kwds = True): hop = hop.copy() From noreply at buildbot.pypy.org Tue May 20 13:11:54 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 13:11:54 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: fix and add failing test Message-ID: <20140520111154.769B71C03C4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1217:73abd268f72c Date: 2014-05-20 10:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/73abd268f72c/ Log: fix and add failing test diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -186,23 +186,24 @@ static void minor_trace_if_young_cards(object_t **pobj) { /* XXX: maybe add a specialised stmcb_trace_cards() */ - object_t *obj = *pobj; - if (write_locks[get_write_lock_idx((uintptr_t)obj)]) + uintptr_t obj = (uintptr_t)((char*)pobj - STM_SEGMENT->segment_base); + if (write_locks[get_write_lock_idx(obj)]) { + dprintf(("minor_trace_if_young_cards: trace %p\n", *pobj)); minor_trace_if_young(pobj); + } } static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); - /* If WRITE_BARRIER: CARDS_SET */ - /* If not WRITE_BARRIER: maybe CARDS_SET */ + dprintf(("_collect_now: %p\n", obj)); assert(IMPLY(obj->stm_flags & GCFLAG_WRITE_BARRIER, obj->stm_flags & GCFLAG_CARDS_SET)); if (!(obj->stm_flags & GCFLAG_WRITE_BARRIER)) { /* do normal full trace, even if also card-marked */ obj->stm_flags |= GCFLAG_WRITE_BARRIER; - + dprintf(("-> has no cards\n")); /* Trace the 'obj' to replace pointers to nursery with pointers outside the nursery, possibly forcing nursery objects out and adding them to 'objects_pointing_to_nursery' as well. */ @@ -210,6 +211,8 @@ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); } else { /* only trace cards */ + dprintf(("-> has cards\n")); + assert(!_is_in_nursery(obj)); char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -79,6 +79,7 @@ uint32_t _get_type_id(object_t *obj); void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); +uintptr_t _index_to_offset(object_t *obj, int n); void _set_weakref(object_t *obj, object_t *v); object_t* _get_weakref(object_t *obj); @@ -267,6 +268,16 @@ return *field; } +uintptr_t _index_to_offset(object_t *obj, int n) +{ + long nrefs = (long)((myobj_t*)obj)->type_id - 421420; + assert(n < nrefs); + + stm_char *field_addr = NULL; + field_addr += SIZEOF_MYOBJ; /* header */ + field_addr += n * sizeof(void*); /* field */ + return (uintptr_t)field_addr; +} ssize_t stmcb_size_rounded_up(struct object_s *obj) { @@ -395,22 +406,34 @@ lib._set_type_id(o, tid) return o -def stm_set_ref(obj, idx, ref): - stm_write(obj) +def stm_set_ref(obj, idx, ref, use_cards=False): + if use_cards: + stm_write_card(obj, lib._index_to_offset(obj, idx)) + else: + stm_write(obj) lib._set_ptr(obj, idx, ref) -def stm_get_ref(obj, idx): - stm_read(obj) +def stm_get_ref(obj, idx, use_cards=False): + if use_cards: + stm_read_card(obj, lib._index_to_offset(obj, idx)) + else: + stm_read(obj) return lib._get_ptr(obj, idx) -def stm_set_char(obj, c, offset=HDR): - stm_write(obj) +def stm_set_char(obj, c, offset=HDR, use_cards=False): assert HDR <= offset < stm_get_obj_size(obj) + if use_cards: + stm_write_card(obj, offset) + else: + stm_write(obj) stm_get_real_address(obj)[offset] = c -def stm_get_char(obj, offset=HDR): - stm_read(obj) +def stm_get_char(obj, offset=HDR, use_cards=False): assert HDR <= offset < stm_get_obj_size(obj) + if use_cards: + stm_read_card(obj, offset) + else: + stm_read(obj) return stm_get_real_address(obj)[offset] def stm_get_real_address(obj): From noreply at buildbot.pypy.org Tue May 20 13:11:55 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 13:11:55 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: change the interface from offset to index (and finally add the forgotten tests) Message-ID: <20140520111155.96F061C03C4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1218:36e751d8b104 Date: 2014-05-20 13:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/36e751d8b104/ Log: change the interface from offset to index (and finally add the forgotten tests) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -40,7 +40,29 @@ #endif } -static bool _stm_write_slowpath_overflow_objs(object_t *obj, uintptr_t offset) +static void _stm_mark_card(object_t *obj, uintptr_t card_index) +{ + if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { + /* not yet in the list */ + if (STM_PSEGMENT->old_objects_with_cards) { + /* if we never had a minor collection in this transaction, + this list doesn't exist */ + LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); + } + obj->stm_flags |= GCFLAG_CARDS_SET; + } + + /* Just acquire the corresponding lock for the next minor_collection + to know what may have changed. + We already own the object here or it is an overflow obj. */ + uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj) + card_index; + assert(write_locks[card_lock_idx] == 0 + || write_locks[card_lock_idx] == STM_PSEGMENT->write_lock_num); + if (!write_locks[card_lock_idx]) + write_locks[card_lock_idx] = STM_PSEGMENT->write_lock_num; +} + +static bool _stm_write_slowpath_overflow_objs(object_t *obj, uintptr_t card_index) { /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) @@ -49,24 +71,15 @@ assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); - if (!offset) { - /* no card to be marked */ + if (!card_index) { + /* no card to be marked, don't call again until next collection */ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } else { /* don't remove GCFLAG_WRITE_BARRIER because we need to be here for every card to mark */ - if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { - /* not yet in the list */ - LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); - obj->stm_flags |= GCFLAG_CARDS_SET; - } - - /* just acquire the corresponding lock for the next minor_collection - to know what may have changed. only we know about this object: */ - uintptr_t lock_idx = get_write_lock_idx((uintptr_t)obj + offset); - assert(!write_locks[lock_idx]); - write_locks[lock_idx] = STM_PSEGMENT->write_lock_num; + assert(STM_PSEGMENT->old_objects_with_cards); + _stm_mark_card(obj, card_index); } /* We don't need to do anything in the STM part of the WB slowpath: */ @@ -77,14 +90,18 @@ return false; } -void _stm_write_slowpath(object_t *obj, uintptr_t offset) +void _stm_write_slowpath(object_t *obj, uintptr_t card_index) { - assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), offset == 0)); + assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), card_index == 0)); + assert( + IMPLY(card_index, (card_index - 1) * CARD_SIZE < stmcb_size_rounded_up( + (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, + obj)))); assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - if (_stm_write_slowpath_overflow_objs(obj, offset)) + if (_stm_write_slowpath_overflow_objs(obj, card_index)) return; /* do a read-barrier now. Note that this must occur before the @@ -98,9 +115,7 @@ 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ - bool lock_whole = offset == 0; uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); - //uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj + offset); uint8_t lock_num = STM_PSEGMENT->write_lock_num; assert(base_lock_idx < sizeof(write_locks)); retry: @@ -175,43 +190,36 @@ goto retry; } + + /* check that we really have a private page */ + assert(is_private_page(STM_SEGMENT->segment_num, + ((uintptr_t)obj) / 4096)); + + /* check that so far all copies of the object have the flag */ + check_flag_write_barrier(obj); + /* A common case for write_locks[] that was either 0 or lock_num: we need to add the object to the appropriate list if there is one. */ - if (lock_whole) { + if (!card_index) { if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } - /* check that we really have a private page */ - assert(is_private_page(STM_SEGMENT->segment_num, - ((uintptr_t)obj) / 4096)); - /* check that so far all copies of the object have the flag */ - check_flag_write_barrier(obj); - /* remove GCFLAG_WRITE_BARRIER if we succeeded in getting the base write-lock (not for card marking). */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - /* for sanity, check again that all other segment copies of this - object still have the flag (so privatization worked) */ - check_flag_write_barrier(obj); + } else { + /* don't remove WRITE_BARRIER */ + _stm_mark_card(obj, card_index); + } - } else { /* card marking case */ + /* for sanity, check again that all other segment copies of this + object still have the flag (so privatization worked) */ + check_flag_write_barrier(obj); - if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { - /* not yet in the list (may enter here multiple times) */ - if (STM_PSEGMENT->old_objects_with_cards != NULL) { - LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); - } - obj->stm_flags |= GCFLAG_CARDS_SET; - } - - /* check that we really have a private page */ - assert(is_private_page(STM_SEGMENT->segment_num, - ((uintptr_t)obj + offset) / 4096)); - } } static void reset_transaction_read_version(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -226,6 +226,10 @@ #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) +static inline uintptr_t get_card_index(uintptr_t byte_offset) { + assert(_STM_CARD_SIZE == 32); + return (byte_offset >> 5) + 1; +} static inline uintptr_t get_write_lock_idx(uintptr_t obj) { return (obj >> 4) - WRITELOCK_START; } @@ -262,15 +266,6 @@ return rm == other_transaction_read_version; } -static inline bool was_read_remote_card(char *base, object_t *obj, uintptr_t offset, - uint8_t other_transaction_read_version) -{ - uint8_t rm = ((struct stm_read_marker_s *) - (base + (((uintptr_t)obj + offset) >> 4)))->rm; - assert(rm <= other_transaction_read_version); - return rm == other_transaction_read_version; -} - static inline void _duck(void) { /* put a call to _duck() between two instructions that set 0 into a %gs-prefixed address and that may otherwise be replaced with diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -40,12 +40,6 @@ return (obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) == 0; } -bool _stm_was_read_card(object_t *obj, uintptr_t offset) -{ - return was_read_remote_card( - STM_SEGMENT->segment_base, obj, offset, - STM_SEGMENT->transaction_read_version); -} bool _stm_was_written_card(object_t *obj) { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -183,16 +183,32 @@ minor_trace_if_young(&tl->thread_local_obj); } +static __thread object_t *_card_base_obj; static void minor_trace_if_young_cards(object_t **pobj) { - /* XXX: maybe add a specialised stmcb_trace_cards() */ - uintptr_t obj = (uintptr_t)((char*)pobj - STM_SEGMENT->segment_base); - if (write_locks[get_write_lock_idx(obj)]) { + /* XXX: add a specialised stmcb_trace_cards() that + also gives the obj-base */ + assert(_card_base_obj); + uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)_card_base_obj); + uintptr_t card_lock_idx = base_lock_idx; + card_lock_idx += get_card_index( + (uintptr_t)((char*)pobj - STM_SEGMENT->segment_base) - (uintptr_t)_card_base_obj); + + if (write_locks[card_lock_idx]) { dprintf(("minor_trace_if_young_cards: trace %p\n", *pobj)); minor_trace_if_young(pobj); } } +static void _trace_card_object(object_t *obj) +{ + /* XXX HACK XXX: */ + _card_base_obj = obj; + assert(!_is_in_nursery(obj)); + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); +} + static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); @@ -212,9 +228,7 @@ } else { /* only trace cards */ dprintf(("-> has cards\n")); - assert(!_is_in_nursery(obj)); - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); + _trace_card_object(obj); } /* clear the CARDS_SET, but not the real cards since they are diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -84,7 +84,7 @@ /* Check that some values are acceptable */ assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); assert(CARD_SIZE > 0 && CARD_SIZE % 16 == 0); - assert(CARD_SIZE == 16); /* actually, it is hardcoded in some places right now.. */ + assert(CARD_SIZE == 32); /* actually, it is hardcoded in some places right now.. */ assert(4096 <= ((uintptr_t)STM_SEGMENT)); assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -120,7 +120,6 @@ #include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -bool _stm_was_read_card(object_t *obj, uintptr_t offset); bool _stm_was_written_card(object_t *obj); uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); @@ -147,7 +146,7 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_GCFLAG_HAS_CARDS 0x08 #define _STM_GCFLAG_CARDS_SET 0x10 -#define _STM_CARD_SIZE 16 /* modulo 16 == 0! */ +#define _STM_CARD_SIZE 32 /* 16 may be safe too */ #define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) @@ -218,21 +217,18 @@ _stm_write_slowpath(obj, 0); } -/* The following are barriers that work on the granularity of CARD_SIZE. - They can only be used on objects one called stm_use_cards() on. */ +/* The following is a GC-optimized barrier that works on the granularity + of CARD_SIZE. It can only be used on objects one called stm_use_cards() + on. It has the same purpose as stm_write() for TM. + 'index' is the byte-offset into the object divided by _STM_CARD_SIZE + plus 1: (offset // CARD_SIZE) + 1 +*/ __attribute__((always_inline)) -static inline void stm_read_card(object_t *obj, uintptr_t offset) -{ - OPT_ASSERT(obj->stm_flags & _STM_GCFLAG_HAS_CARDS); - ((stm_read_marker_t *)(((uintptr_t)obj + offset) >> 4))->rm = - STM_SEGMENT->transaction_read_version; -} -__attribute__((always_inline)) -static inline void stm_write_card(object_t *obj, uintptr_t offset) +static inline void stm_write_card(object_t *obj, uintptr_t index) { OPT_ASSERT(obj->stm_flags & _STM_GCFLAG_HAS_CARDS); if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) - _stm_write_slowpath(obj, offset); + _stm_write_slowpath(obj, index); } /* Must be provided by the user of this library. diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -44,7 +44,6 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up); void stm_use_cards(object_t* o); -void stm_read_card(object_t *obj, uintptr_t offset); /*void stm_write_card(); use _checked_stm_write_card() instead */ @@ -56,9 +55,8 @@ object_t *stm_setup_prebuilt_weakref(object_t *); bool _checked_stm_write(object_t *obj); -bool _checked_stm_write_card(object_t *obj, uintptr_t offset); +bool _checked_stm_write_card(object_t *obj, uintptr_t index); bool _stm_was_read(object_t *obj); -bool _stm_was_read_card(object_t *obj, uintptr_t offset); bool _stm_was_written(object_t *obj); bool _stm_was_written_card(object_t *obj); char *_stm_real_address(object_t *obj); @@ -79,7 +77,7 @@ uint32_t _get_type_id(object_t *obj); void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); -uintptr_t _index_to_offset(object_t *obj, int n); +uintptr_t _index_to_card_index(object_t *obj, int n); void _set_weakref(object_t *obj, object_t *v); object_t* _get_weakref(object_t *obj); @@ -192,8 +190,8 @@ CHECKED(stm_write(object)); } -bool _checked_stm_write_card(object_t *object, uintptr_t offset) { - CHECKED(stm_write_card(object, offset)); +bool _checked_stm_write_card(object_t *object, uintptr_t index) { + CHECKED(stm_write_card(object, index)); } bool _check_stop_safe_point(void) { @@ -268,7 +266,7 @@ return *field; } -uintptr_t _index_to_offset(object_t *obj, int n) +uintptr_t _index_to_card_index(object_t *obj, int n) { long nrefs = (long)((myobj_t*)obj)->type_id - 421420; assert(n < nrefs); @@ -276,7 +274,7 @@ stm_char *field_addr = NULL; field_addr += SIZEOF_MYOBJ; /* header */ field_addr += n * sizeof(void*); /* field */ - return (uintptr_t)field_addr; + return ((uintptr_t)field_addr / _STM_CARD_SIZE) + 1; } ssize_t stmcb_size_rounded_up(struct object_s *obj) @@ -358,6 +356,8 @@ class EmptyStack(Exception): pass +def byte_offset_to_card_index(offset): + return (offset // CARD_SIZE) + 1 def is_in_nursery(o): return lib.stm_can_move(o) @@ -408,32 +408,27 @@ def stm_set_ref(obj, idx, ref, use_cards=False): if use_cards: - stm_write_card(obj, lib._index_to_offset(obj, idx)) + stm_write_card(obj, lib._index_to_card_index(obj, idx)) else: stm_write(obj) lib._set_ptr(obj, idx, ref) -def stm_get_ref(obj, idx, use_cards=False): - if use_cards: - stm_read_card(obj, lib._index_to_offset(obj, idx)) - else: - stm_read(obj) +def stm_get_ref(obj, idx): + stm_read(obj) return lib._get_ptr(obj, idx) def stm_set_char(obj, c, offset=HDR, use_cards=False): assert HDR <= offset < stm_get_obj_size(obj) if use_cards: - stm_write_card(obj, offset) + index = byte_offset_to_card_index(offset) + stm_write_card(obj, index) else: stm_write(obj) stm_get_real_address(obj)[offset] = c -def stm_get_char(obj, offset=HDR, use_cards=False): +def stm_get_char(obj, offset=HDR): assert HDR <= offset < stm_get_obj_size(obj) - if use_cards: - stm_read_card(obj, offset) - else: - stm_read(obj) + stm_read(obj) return stm_get_real_address(obj)[offset] def stm_get_real_address(obj): @@ -442,19 +437,14 @@ def stm_read(o): lib.stm_read(o) -def stm_read_card(o, offset): - assert stm_get_flags(o) & GCFLAG_HAS_CARDS - assert offset < stm_get_obj_size(o) - lib.stm_read_card(o, offset) def stm_write(o): if lib._checked_stm_write(o): raise Conflict() -def stm_write_card(o, offset): +def stm_write_card(o, index): assert stm_get_flags(o) & GCFLAG_HAS_CARDS - assert offset < stm_get_obj_size(o) - if lib._checked_stm_write_card(o, offset): + if lib._checked_stm_write_card(o, index): raise Conflict() def stm_was_read(o): @@ -463,9 +453,6 @@ def stm_was_written(o): return lib._stm_was_written(o) -def stm_was_read_card(o, offset): - return lib._stm_was_read_card(o, offset) - def stm_was_written_card(o): return lib._stm_was_written_card(o) diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py new file mode 100644 --- /dev/null +++ b/c7/test/test_card_marking.py @@ -0,0 +1,56 @@ +from support import * +import py + +class TestBasic(BaseTest): + + def test_simple(self): + o = stm_allocate_old(1024, True) + self.start_transaction() + stm_read(o) + stm_write(o) + self.commit_transaction() + + + def test_simple2(self): + o = stm_allocate_old(1024, True) + self.start_transaction() + stm_write_card(o, 5) + assert not stm_was_written(o) # don't remove GCFLAG_WRITE_BARRIER + assert stm_was_written_card(o) + self.commit_transaction() + + def test_overflow(self): + self.start_transaction() + o = stm_allocate(1024, True) + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + stm_write_card(o, 5) + # don't remove GCFLAG_WB + assert not stm_was_written(o) + stm_write(o) + assert stm_was_written(o) + self.commit_transaction() + + def test_nursery(self): + o = stm_allocate_old_refs(200, True) + self.start_transaction() + p = stm_allocate(64, True) + d = stm_allocate(64, True) + stm_set_ref(o, 199, p, True) + + # without a write-barrier: + lib._set_ptr(o, 0, d) + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + pn = stm_get_ref(o, 199) + assert not is_in_nursery(pn) + assert pn != p + + # d was not traced! + dn = stm_get_ref(o, 0) + assert is_in_nursery(dn) + assert dn == d From noreply at buildbot.pypy.org Tue May 20 14:29:49 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 14:29:49 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: fixes Message-ID: <20140520122949.B74E31D2DBB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1219:a290c9b39ef9 Date: 2014-05-20 14:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/a290c9b39ef9/ Log: fixes diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -42,6 +42,9 @@ static void _stm_mark_card(object_t *obj, uintptr_t card_index) { + assert(card_index > 0); + dprintf(("mark %p card %lu\n", obj, card_index)); + if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { /* not yet in the list */ if (STM_PSEGMENT->old_objects_with_cards) { @@ -56,8 +59,11 @@ to know what may have changed. We already own the object here or it is an overflow obj. */ uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj) + card_index; + assert(write_locks[card_lock_idx] == 0 || write_locks[card_lock_idx] == STM_PSEGMENT->write_lock_num); + assert(get_write_lock_idx((uintptr_t)obj) != card_lock_idx); + if (!write_locks[card_lock_idx]) write_locks[card_lock_idx] = STM_PSEGMENT->write_lock_num; } @@ -175,7 +181,8 @@ } } else if (write_locks[base_lock_idx] == lock_num) { - OPT_ASSERT(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + assert(IMPLY(!(obj->stm_flags & GCFLAG_CARDS_SET), + STM_PSEGMENT->objects_pointing_to_nursery != NULL)); #ifdef STM_TESTS bool found = false; LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -67,6 +67,13 @@ return list_count(STM_PSEGMENT->objects_pointing_to_nursery); } +long _stm_count_old_objects_with_cards(void) +{ + if (STM_PSEGMENT->old_objects_with_cards == NULL) + return -1; + return list_count(STM_PSEGMENT->old_objects_with_cards); +} + object_t *_stm_enum_modified_old_objects(long index) { return (object_t *)list_item( @@ -79,6 +86,12 @@ STM_PSEGMENT->objects_pointing_to_nursery, index); } +object_t *_stm_enum_old_objects_with_cards(long index) +{ + return (object_t *)list_item( + STM_PSEGMENT->old_objects_with_cards, index); +} + uint64_t _stm_total_allocated(void) { return increment_total_allocated(0); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -183,6 +183,23 @@ minor_trace_if_young(&tl->thread_local_obj); } +static void _reset_cards(object_t *obj) +{ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up((struct object_s *)realobj); + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_card_index(size - 1); + OPT_ASSERT(last_card_index >= card_index); + while (card_index <= last_card_index) { + write_locks[first_card_index + card_index] = 0; + card_index++; + } + + obj->stm_flags &= ~GCFLAG_CARDS_SET; +} + static __thread object_t *_card_base_obj; static void minor_trace_if_young_cards(object_t **pobj) { @@ -205,37 +222,51 @@ /* XXX HACK XXX: */ _card_base_obj = obj; assert(!_is_in_nursery(obj)); + assert(obj->stm_flags & GCFLAG_CARDS_SET); + + dprintf(("_trace_card_object(%p)\n", obj)); + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); + + _reset_cards(obj); } + + static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); dprintf(("_collect_now: %p\n", obj)); - assert(IMPLY(obj->stm_flags & GCFLAG_WRITE_BARRIER, - obj->stm_flags & GCFLAG_CARDS_SET)); + if (!(obj->stm_flags & GCFLAG_WRITE_BARRIER)) { - /* do normal full trace, even if also card-marked */ - obj->stm_flags |= GCFLAG_WRITE_BARRIER; - dprintf(("-> has no cards\n")); /* Trace the 'obj' to replace pointers to nursery with pointers outside the nursery, possibly forcing nursery objects out and adding them to 'objects_pointing_to_nursery' as well. */ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); - } else { - /* only trace cards */ - dprintf(("-> has cards\n")); + + obj->stm_flags |= GCFLAG_WRITE_BARRIER; + if (obj->stm_flags & GCFLAG_CARDS_SET) { + _reset_cards(obj); + } + } if (obj->stm_flags & GCFLAG_CARDS_SET) { _trace_card_object(obj); } +} - /* clear the CARDS_SET, but not the real cards since they are - still needed by STM conflict detection - XXX: maybe separate them since we now have to also trace all - these cards again in the next minor_collection */ - obj->stm_flags &= ~GCFLAG_CARDS_SET; + +static void collect_cardrefs_to_nursery(void) +{ + struct list_s *lst = STM_PSEGMENT->old_objects_with_cards; + + while (!list_is_empty(lst)) { + object_t *obj = (object_t*)list_pop_item(lst); + + assert(obj->stm_flags & GCFLAG_CARDS_SET); + _collect_now(obj); + } } static void collect_oldrefs_to_nursery(void) @@ -270,8 +301,9 @@ static void collect_modified_old_objects(void) { - LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t * /*item*/, - _collect_now(item)); + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, object_t * /*item*/, + _collect_now(item)); } static void collect_roots_from_markers(uintptr_t num_old) @@ -371,6 +403,7 @@ to hold the ones we didn't trace so far. */ uintptr_t num_old; if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { + assert(STM_PSEGMENT->old_objects_with_cards == NULL); STM_PSEGMENT->objects_pointing_to_nursery = list_create(); STM_PSEGMENT->old_objects_with_cards = list_create(); @@ -390,7 +423,9 @@ collect_roots_in_nursery(); + collect_cardrefs_to_nursery(); collect_oldrefs_to_nursery(); + assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); /* now all surviving nursery objects have been moved out */ stm_move_young_weakrefs(); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -138,8 +138,10 @@ void _stm_set_nursery_free_count(uint64_t free_count); long _stm_count_modified_old_objects(void); long _stm_count_objects_pointing_to_nursery(void); +long _stm_count_old_objects_with_cards(void); object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); +object_t *_stm_enum_old_objects_with_cards(long index); uint64_t _stm_total_allocated(void); #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -101,8 +101,11 @@ long _stm_count_modified_old_objects(void); long _stm_count_objects_pointing_to_nursery(void); +long _stm_count_old_objects_with_cards(void); object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); +object_t *_stm_enum_old_objects_with_cards(long index); + void stm_collect(long level); uint64_t _stm_total_allocated(void); @@ -496,6 +499,14 @@ return None return map(lib._stm_enum_objects_pointing_to_nursery, range(count)) +def old_objects_with_cards(): + count = lib._stm_count_old_objects_with_cards() + if count < 0: + return None + return map(lib._stm_enum_old_objects_with_cards, range(count)) + + + SHADOWSTACK_LENGTH = 1000 _keepalive = weakref.WeakKeyDictionary() diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -54,3 +54,66 @@ dn = stm_get_ref(o, 0) assert is_in_nursery(dn) assert dn == d + + assert not stm_was_written(o) + stm_write_card(o, 2) + assert stm_was_written_card(o) + + # card cleared after last collection, + # so no retrace of index 199: + d2 = stm_allocate(64, True) + # without a write-barrier: + lib._set_ptr(o, 199, d2) + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + # d2 was not traced! + dn = stm_get_ref(o, 199) + assert is_in_nursery(dn) + assert dn == d2 + + def test_nursery2(self): + o = stm_allocate_old_refs(200, True) + self.start_transaction() + p = stm_allocate(64) + d = stm_allocate(64) + e = stm_allocate(64) + stm_set_ref(o, 199, p, True) + stm_set_ref(o, 1, d, False) + lib._set_ptr(o, 100, e) + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + # stm_write in stm_set_ref made it trace everything + assert not is_in_nursery(stm_get_ref(o, 199)) + assert not is_in_nursery(stm_get_ref(o, 1)) + assert not is_in_nursery(stm_get_ref(o, 100)) + + def test_nursery3(self): + o = stm_allocate_old_refs(200, True) + self.start_transaction() + stm_minor_collect() + + p = stm_allocate(64) + d = stm_allocate(64) + e = stm_allocate(64) + stm_set_ref(o, 199, p, True) + stm_set_ref(o, 1, d, True) + lib._set_ptr(o, 100, e) # no card marked! + + assert not stm_was_written(o) + assert stm_was_written_card(o) + + print modified_old_objects() + print objects_pointing_to_nursery() + print old_objects_with_cards() + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + assert not is_in_nursery(stm_get_ref(o, 199)) + assert not is_in_nursery(stm_get_ref(o, 1)) + assert stm_get_ref(o, 100) == e # not traced From noreply at buildbot.pypy.org Tue May 20 15:23:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 15:23:21 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: Try again to improve the performance of shadowstack, this time with a global register Message-ID: <20140520132321.793DB1C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71599:f79e1101b4ce Date: 2014-05-20 14:29 +0200 http://bitbucket.org/pypy/pypy/changeset/f79e1101b4ce/ Log: Try again to improve the performance of shadowstack, this time with a global register From noreply at buildbot.pypy.org Tue May 20 15:23:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 15:23:22 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: Starting Message-ID: <20140520132322.A8B551C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71600:77dafb290569 Date: 2014-05-20 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/77dafb290569/ Log: Starting diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -1,4 +1,6 @@ +from rpython.flowspace.model import Block, Link, SpaceOperation from rpython.annotator import model as annmodel +from rpython.translator.unsimplify import varoftype, copyvar from rpython.rtyper.llannotation import SomePtr from rpython.rlib.debug import ll_assert from rpython.rlib.nonconst import NonConstant @@ -13,46 +15,38 @@ class ShadowStackFrameworkGCTransformer(BaseFrameworkGCTransformer): - def annotate_walker_functions(self, getfn): - self.incr_stack_ptr = getfn(self.root_walker.incr_stack, - [annmodel.SomeInteger()], - SomeAddress(), - inline = True) - self.decr_stack_ptr = getfn(self.root_walker.decr_stack, - [annmodel.SomeInteger()], - SomeAddress(), - inline = True) - def build_root_walker(self): return ShadowStackRootWalker(self) + def transform_graph(self, graph): + self._transforming_graph = graph + super(ShadowStackFrameworkGCTransformer, self).transform_graph(graph) + del self._transforming_graph + + def ensure_ss_graph_marker(self): + graph = self._transforming_graph + ops = graph.startblock.operations + if not ops or ops[0].opname != 'gc_ss_graph_marker': + inputargs = [copyvar(self.translator.annotator, v) + for v in graph.startblock.inputargs] + block = Block(inputargs) + v_void = varoftype(lltype.Void) + block.operations.append(SpaceOperation('gc_ss_graph_marker', + [], v_void)) + block.closeblock(Link(inputargs, graph.startblock)) + graph.startblock = block + def push_roots(self, hop, keep_current_args=False): livevars = self.get_livevars_for_roots(hop, keep_current_args) self.num_pushs += len(livevars) - if not livevars: - return [] - c_len = rmodel.inputconst(lltype.Signed, len(livevars) ) - base_addr = hop.genop("direct_call", [self.incr_stack_ptr, c_len ], - resulttype=llmemory.Address) - for k,var in enumerate(livevars): - c_k = rmodel.inputconst(lltype.Signed, k * sizeofaddr) - v_adr = gen_cast(hop.llops, llmemory.Address, var) - hop.genop("raw_store", [base_addr, c_k, v_adr]) + self.ensure_ss_graph_marker() + hop.genop("gc_ss_store", livevars) return livevars def pop_roots(self, hop, livevars): - if not livevars: - return - c_len = rmodel.inputconst(lltype.Signed, len(livevars) ) - base_addr = hop.genop("direct_call", [self.decr_stack_ptr, c_len ], - resulttype=llmemory.Address) - if self.gcdata.gc.moving_gc: - # for moving collectors, reload the roots into the local variables - for k,var in enumerate(livevars): - c_k = rmodel.inputconst(lltype.Signed, k * sizeofaddr) - v_newaddr = hop.genop("raw_load", [base_addr, c_k], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) + # for moving collectors, reload the roots into the local variables + if self.gcdata.gc.moving_gc and livevars: + hop.genop("gc_ss_reload", livevars) class ShadowStackRootWalker(BaseRootWalker): @@ -61,18 +55,6 @@ # NB. 'self' is frozen, but we can use self.gcdata to store state gcdata = self.gcdata - def incr_stack(n): - top = gcdata.root_stack_top - gcdata.root_stack_top = top + n*sizeofaddr - return top - self.incr_stack = incr_stack - - def decr_stack(n): - top = gcdata.root_stack_top - n*sizeofaddr - gcdata.root_stack_top = top - return top - self.decr_stack = decr_stack - root_iterator = get_root_iterator(gctransformer) def walk_stack_root(callback, start, end): root_iterator.setcontext(NonConstant(llmemory.NULL)) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -465,7 +465,9 @@ 'gc_restore_exception': LLOp(), 'gc_call_rtti_destructor': LLOp(), 'gc_deallocate': LLOp(), - 'gc_reload_possibly_moved': LLOp(), + 'gc_ss_graph_marker': LLOp(), + 'gc_ss_store': LLOp(), + 'gc_ss_reload': LLOp(), # see rlib/objectmodel for gc_identityhash and gc_id 'gc_identityhash': LLOp(sideeffects=False, canmallocgc=True), 'gc_id': LLOp(sideeffects=False, canmallocgc=True), diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -437,6 +437,29 @@ from rpython.memory.gctransform import shadowstack return shadowstack.ShadowStackFrameworkGCTransformer(self.db.translator) + def OP_GC_SS_GRAPH_MARKER(self, funcgen, op): + return '; struct rpy_shadowstack_s *rpy_ss = rpy_shadowstack;' + + def OP_GC_SS_STORE(self, funcgen, op): + lines = [] + for i, v in enumerate(op.args): + lines.append('rpy_ss[%d].s = %s;' % (i, funcgen.expr(v))) + lines.append('rpy_shadowstack = rpy_ss + %d;' % len(op.args)) + return '\n'.join(lines) + + def OP_GC_SS_RELOAD(self, funcgen, op): + lines = [] + for i, v in enumerate(op.args): + typename = funcgen.db.gettype(v.concretetype) + lines.append('%s = (%s)rpy_ss[%d].s;' % ( + funcgen.expr(v), + cdecl(typename, ''), + i)) + if isinstance(v, Constant): + lines[-1] = '/* %s */' % lines[-1] + lines.reverse() + return '\n'.join(lines) + class AsmGcRootFrameworkGcPolicy(BasicFrameworkGcPolicy): def gettransformer(self): diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -35,7 +35,6 @@ #ifdef PYPY_USE_ASMGCC pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif - pypy_asm_stack_bottom(); instrument_setup(); #ifndef MS_WINDOWS @@ -50,6 +49,7 @@ errmsg = RPython_StartupCode(); if (errmsg) goto error; + pypy_asm_stack_bottom(); list = _RPyListOfString_New(argc); if (RPyExceptionOccurred()) goto memory_out; for (i=0; i Author: Armin Rigo Branch: shadowstack-again Changeset: r71601:fa7c09e2791f Date: 2014-05-20 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/fa7c09e2791f/ Log: Small fixes until some tests of test_newgc pass again diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -8,6 +8,7 @@ from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.llannotation import SomeAddress from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) @@ -85,6 +86,7 @@ BaseRootWalker.setup_root_walker(self) def walk_stack_roots(self, collect_stack_root): + llop.gc_stack_top(lltype.Void) gcdata = self.gcdata self.rootstackhook(collect_stack_root, gcdata.root_stack_base, gcdata.root_stack_top) @@ -317,6 +319,7 @@ self.gcdata.root_stack_base = self.unused_full_stack self.gcdata.root_stack_top = self.unused_full_stack self.unused_full_stack = llmemory.NULL + llop.gc_stack_bottom(lltype.Void) def _cleanup(self, shadowstackref): shadowstackref.base = llmemory.NULL diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -507,6 +507,7 @@ # see translator/c/src/mem.h for the valid indices 'gc_asmgcroot_static': LLOp(sideeffects=False), 'gc_stack_bottom': LLOp(canrun=True), + 'gc_stack_top': LLOp(canrun=True), # for stacklet+asmgcroot support 'gc_detach_callback_pieces': LLOp(), diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -431,6 +431,12 @@ raise AssertionError(subopnum) return ' '.join(parts) + def OP_GC_STACK_BOTTOM(self, funcgen, op): + return 'pypy_asm_stack_bottom();' + + def OP_GC_STACK_TOP(self, funcgen, op): + return 'pypy_asm_stack_top();' + class ShadowStackFrameworkGcPolicy(BasicFrameworkGcPolicy): def gettransformer(self): @@ -469,9 +475,6 @@ def GC_KEEPALIVE(self, funcgen, v): return 'pypy_asm_keepalive(%s);' % funcgen.expr(v) - def OP_GC_STACK_BOTTOM(self, funcgen, op): - return 'pypy_asm_stack_bottom();' - name_to_gcpolicy = { 'boehm': BoehmGcPolicy, diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -36,6 +36,7 @@ pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif instrument_setup(); + pypy_asm_stack_bottom(); #ifndef MS_WINDOWS /* this message does no longer apply to win64 :-) */ @@ -49,7 +50,6 @@ errmsg = RPython_StartupCode(); if (errmsg) goto error; - pypy_asm_stack_bottom(); list = _RPyListOfString_New(argc); if (RPyExceptionOccurred()) goto memory_out; for (i=0; i Author: Remi Meier Branch: card-marking Changeset: r1220:e1ac75ed3871 Date: 2014-05-20 15:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/e1ac75ed3871/ Log: clean cards on abort diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -359,6 +359,12 @@ } tree_clear(pseg->nursery_objects_shadows); + + if (STM_PSEGMENT->old_objects_with_cards) { + LIST_FOREACH_R(STM_PSEGMENT->old_objects_with_cards, object_t * /*item*/, + _reset_cards(item)); + } + return nursery_used; } diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -106,10 +106,6 @@ assert not stm_was_written(o) assert stm_was_written_card(o) - print modified_old_objects() - print objects_pointing_to_nursery() - print old_objects_with_cards() - self.push_root(o) stm_minor_collect() o = self.pop_root() @@ -117,3 +113,34 @@ assert not is_in_nursery(stm_get_ref(o, 199)) assert not is_in_nursery(stm_get_ref(o, 1)) assert stm_get_ref(o, 100) == e # not traced + + def test_abort_cleanup(self): + o = stm_allocate_old_refs(200, True) + self.start_transaction() + stm_minor_collect() + + p = stm_allocate_refs(64) + d = stm_allocate(64) + e = stm_allocate(64) + stm_set_ref(o, 199, p, True) + stm_set_ref(o, 1, d, True) + stm_set_ref(p, 1, e) + + self.abort_transaction() + + assert not modified_old_objects() + assert not objects_pointing_to_nursery() + assert not old_objects_with_cards() + + self.start_transaction() + d = stm_allocate(64) + e = stm_allocate(64) + lib._set_ptr(o, 199, d) # no barrier + stm_set_ref(o, 1, e, True) # card barrier + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + assert not is_in_nursery(stm_get_ref(o, 1)) + assert is_in_nursery(stm_get_ref(o, 199)) # not traced From noreply at buildbot.pypy.org Tue May 20 17:05:26 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 17:05:26 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: fix for last commit Message-ID: <20140520150526.B47AE1C3382@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1221:bdd0e55e018f Date: 2014-05-20 15:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/bdd0e55e018f/ Log: fix for last commit diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -326,6 +326,10 @@ static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT /* reset the nursery by zeroing it */ size_t nursery_used; char *realnursery; @@ -360,12 +364,14 @@ tree_clear(pseg->nursery_objects_shadows); - if (STM_PSEGMENT->old_objects_with_cards) { - LIST_FOREACH_R(STM_PSEGMENT->old_objects_with_cards, object_t * /*item*/, + if (pseg->old_objects_with_cards) { + LIST_FOREACH_R(pseg->old_objects_with_cards, object_t * /*item*/, _reset_cards(item)); } return nursery_used; +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } #define MINOR_NOTHING_TO_DO(pseg) \ From noreply at buildbot.pypy.org Tue May 20 17:05:27 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 17:05:27 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: test & fix for major gc Message-ID: <20140520150527.BE14E1C3382@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1222:b7879ec94205 Date: 2014-05-20 17:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/b7879ec94205/ Log: test & fix for major gc diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -231,7 +231,9 @@ return (byte_offset >> 5) + 1; } static inline uintptr_t get_write_lock_idx(uintptr_t obj) { - return (obj >> 4) - WRITELOCK_START; + uintptr_t res = (obj >> 4) - WRITELOCK_START; + assert(res < sizeof(write_locks)); + return res; } static inline char *get_segment_base(long segment_num) { diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -166,7 +166,7 @@ static inline uintptr_t mark_loc(object_t *obj) { - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + uintptr_t lock_idx = get_write_lock_idx((uintptr_t)obj); assert(lock_idx < sizeof(write_locks)); return lock_idx; } @@ -450,7 +450,7 @@ written to but don't actually point to the nursery. Clear it up and set GCFLAG_WRITE_BARRIER again on the objects. This is the case for transactions where - MINOR_NOTHING_TO_DO() == false + MINOR_NOTHING_TO_DO() == true but they still did write-barriers on objects */ lst = pseg->objects_pointing_to_nursery; @@ -463,6 +463,16 @@ realobj->stm_flags |= GCFLAG_WRITE_BARRIER; })); list_clear(lst); + + lst = pseg->old_objects_with_cards; + LIST_FOREACH_R(lst, object_t* /*item*/, + ({ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + OPT_ASSERT(realobj->stm_flags & GCFLAG_CARDS_SET); + _reset_object_cards(&pseg->pub, item); + })); + list_clear(lst); } /* Remove from 'large_overflow_objects' all objects that die */ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -183,10 +183,14 @@ minor_trace_if_young(&tl->thread_local_obj); } -static void _reset_cards(object_t *obj) +static void _reset_object_cards(struct stm_segment_info_s *seg, object_t *obj) { - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size_t size = stmcb_size_rounded_up((struct object_s *)realobj); +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(seg->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; @@ -197,7 +201,10 @@ card_index++; } - obj->stm_flags &= ~GCFLAG_CARDS_SET; + realobj->stm_flags &= ~GCFLAG_CARDS_SET; + dprintf(("reset cards on %p\n", obj)); +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } static __thread object_t *_card_base_obj; @@ -229,7 +236,8 @@ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); - _reset_cards(obj); + _reset_object_cards( + get_segment(STM_SEGMENT->segment_num), obj); } @@ -249,7 +257,8 @@ obj->stm_flags |= GCFLAG_WRITE_BARRIER; if (obj->stm_flags & GCFLAG_CARDS_SET) { - _reset_cards(obj); + _reset_object_cards( + get_segment(STM_SEGMENT->segment_num), obj); } } if (obj->stm_flags & GCFLAG_CARDS_SET) { _trace_card_object(obj); @@ -366,7 +375,7 @@ if (pseg->old_objects_with_cards) { LIST_FOREACH_R(pseg->old_objects_with_cards, object_t * /*item*/, - _reset_cards(item)); + _reset_object_cards(&pseg->pub, item)); } return nursery_used; diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -6,6 +6,7 @@ static uint32_t highest_overflow_number; +static void _reset_object_cards(struct stm_segment_info_s *seg, object_t *obj); static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -1,8 +1,21 @@ from support import * import py + class TestBasic(BaseTest): + def _collect(self, kind): + if kind == 0: + stm_minor_collect() + elif kind == 1: + stm_major_collect() + elif kind == 2: + self.switch(1) + self.start_transaction() + stm_major_collect() + self.abort_transaction() + self.switch(0) + def test_simple(self): o = stm_allocate_old(1024, True) self.start_transaction() @@ -10,7 +23,6 @@ stm_write(o) self.commit_transaction() - def test_simple2(self): o = stm_allocate_old(1024, True) self.start_transaction() @@ -19,13 +31,20 @@ assert stm_was_written_card(o) self.commit_transaction() - def test_overflow(self): + @py.test.mark.parametrize("k", range(3)) + def test_overflow(self, k): self.start_transaction() o = stm_allocate(1024, True) + self.push_root(o) - stm_minor_collect() + self._collect(k) o = self.pop_root() + stm_write_card(o, 5) + + assert o in old_objects_with_cards() + assert o not in modified_old_objects() # overflow object + assert o not in objects_pointing_to_nursery() # don't remove GCFLAG_WB assert not stm_was_written(o) stm_write(o) @@ -80,7 +99,7 @@ e = stm_allocate(64) stm_set_ref(o, 199, p, True) stm_set_ref(o, 1, d, False) - lib._set_ptr(o, 100, e) + lib._set_ptr(o, 100, e) # no barrier self.push_root(o) stm_minor_collect() @@ -144,3 +163,26 @@ assert not is_in_nursery(stm_get_ref(o, 1)) assert is_in_nursery(stm_get_ref(o, 199)) # not traced + + @py.test.mark.parametrize("k", range(3)) + def test_major_gc(self, k): + o = stm_allocate_old_refs(200, True) + self.start_transaction() + p = stm_allocate(64) + stm_set_ref(o, 0, p, True) + + self.push_root(o) + stm_major_collect() + o = self.pop_root() + + stm_set_ref(o, 1, ffi.NULL, True) + p = stm_get_ref(o, 0) + assert stm_was_written_card(o) + + self.push_root(o) + self._collect(k) + o = self.pop_root() + + assert not stm_was_written_card(o) + assert stm_get_ref(o, 0) == p + self.commit_transaction() From noreply at buildbot.pypy.org Tue May 20 17:40:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 17:40:54 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: in-progress: use r15 also to return exceptions Message-ID: <20140520154054.787511C1106@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71602:35fc2a0ae50c Date: 2014-05-20 17:40 +0200 http://bitbucket.org/pypy/pypy/changeset/35fc2a0ae50c/ Log: in-progress: use r15 also to return exceptions diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -1,6 +1,7 @@ from rpython.flowspace.model import Block, Link, SpaceOperation from rpython.annotator import model as annmodel from rpython.translator.unsimplify import varoftype, copyvar +from rpython.translator.backendopt.ssa import SSA_to_SSI from rpython.rtyper.llannotation import SomePtr from rpython.rlib.debug import ll_assert from rpython.rlib.nonconst import NonConstant @@ -16,38 +17,49 @@ class ShadowStackFrameworkGCTransformer(BaseFrameworkGCTransformer): + RPY_SHADOWSTACK_PTR = lltype.Ptr( + lltype.Struct('rpy_shadowstack_s', + hints={"external": "C", "c_name": "rpy_shadowstack_s"})) + def build_root_walker(self): return ShadowStackRootWalker(self) def transform_graph(self, graph): self._transforming_graph = graph + self._ss_graph_marker = None super(ShadowStackFrameworkGCTransformer, self).transform_graph(graph) + del self._ss_graph_marker del self._transforming_graph + def sanitize_graph(self, graph): + SSA_to_SSI(graph, self.translator.annotator) + def ensure_ss_graph_marker(self): - graph = self._transforming_graph - ops = graph.startblock.operations - if not ops or ops[0].opname != 'gc_ss_graph_marker': + if self._ss_graph_marker is None: + graph = self._transforming_graph inputargs = [copyvar(self.translator.annotator, v) for v in graph.startblock.inputargs] - block = Block(inputargs) - v_void = varoftype(lltype.Void) - block.operations.append(SpaceOperation('gc_ss_graph_marker', - [], v_void)) - block.closeblock(Link(inputargs, graph.startblock)) - graph.startblock = block + hblock = Block(inputargs) + v_marker = varoftype(self.RPY_SHADOWSTACK_PTR) + hblock.operations.append(SpaceOperation('gc_ss_graph_marker', + [], v_marker)) + hblock.closeblock(Link(inputargs, graph.startblock)) + graph.startblock = hblock + self._ss_graph_marker = v_marker + return self._ss_graph_marker def push_roots(self, hop, keep_current_args=False): livevars = self.get_livevars_for_roots(hop, keep_current_args) self.num_pushs += len(livevars) - self.ensure_ss_graph_marker() - hop.genop("gc_ss_store", livevars) + v_marker = self.ensure_ss_graph_marker() + hop.genop("gc_ss_store", [v_marker] + livevars) return livevars def pop_roots(self, hop, livevars): # for moving collectors, reload the roots into the local variables if self.gcdata.gc.moving_gc and livevars: - hop.genop("gc_ss_reload", livevars) + v_marker = self.ensure_ss_graph_marker() + hop.genop("gc_ss_reload", [v_marker] + livevars) class ShadowStackRootWalker(BaseRootWalker): diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -247,6 +247,7 @@ old_startblock = graph.startblock graph.startblock = graph.startblock.exits[0].target + self.sanitize_graph(graph) checkgraph(graph) self.links_to_split = None @@ -257,6 +258,9 @@ graph.exc_cleanup = (v, list(llops)) return is_borrowed # xxx for tests only + def sanitize_graph(self, graph): + pass + def annotate_helper(self, ll_helper, ll_args, ll_result, inline=False): assert not self.finished_helpers args_s = map(lltype_to_annotation, ll_args) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -438,6 +438,9 @@ 'gc_gettypeptr_group': LLOp(canfold=True), 'get_member_index': LLOp(canfold=True), + 'getfield_exc_type': LLOp(sideeffects=False), + 'setfield_exc_type': LLOp(), + # __________ used by the JIT ________ 'jit_marker': LLOp(), diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -444,22 +444,25 @@ return shadowstack.ShadowStackFrameworkGCTransformer(self.db.translator) def OP_GC_SS_GRAPH_MARKER(self, funcgen, op): - return '; struct rpy_shadowstack_s *rpy_ss = rpy_shadowstack;' + return '%s = rpy_shadowstack;' % funcgen.expr(op.result) def OP_GC_SS_STORE(self, funcgen, op): + marker = funcgen.expr(op.args[0]) lines = [] - for i, v in enumerate(op.args): - lines.append('rpy_ss[%d].s = %s;' % (i, funcgen.expr(v))) - lines.append('rpy_shadowstack = rpy_ss + %d;' % len(op.args)) + for i, v in enumerate(op.args[1:]): + lines.append('%s[%d].s = %s;' % (marker, i, funcgen.expr(v))) + lines.append('rpy_shadowstack = %s + %d;' % (marker, len(op.args))) return '\n'.join(lines) def OP_GC_SS_RELOAD(self, funcgen, op): + marker = funcgen.expr(op.args[0]) lines = [] - for i, v in enumerate(op.args): + for i, v in enumerate(op.args[1:]): typename = funcgen.db.gettype(v.concretetype) - lines.append('%s = (%s)rpy_ss[%d].s;' % ( + lines.append('%s = (%s)%s[%d].s;' % ( funcgen.expr(v), cdecl(typename, ''), + marker, i)) if isinstance(v, Constant): lines[-1] = '/* %s */' % lines[-1] diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -36,7 +36,6 @@ pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif instrument_setup(); - pypy_asm_stack_bottom(); #ifndef MS_WINDOWS /* this message does no longer apply to win64 :-) */ @@ -50,14 +49,18 @@ errmsg = RPython_StartupCode(); if (errmsg) goto error; + pypy_asm_stack_bottom(); list = _RPyListOfString_New(argc); if (RPyExceptionOccurred()) goto memory_out; for (i=0; i Author: Armin Rigo Branch: shadowstack-again Changeset: r71603:a4e30bdbac19 Date: 2014-05-20 17:50 +0200 http://bitbucket.org/pypy/pypy/changeset/a4e30bdbac19/ Log: fix diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -451,7 +451,7 @@ lines = [] for i, v in enumerate(op.args[1:]): lines.append('%s[%d].s = %s;' % (marker, i, funcgen.expr(v))) - lines.append('rpy_shadowstack = %s + %d;' % (marker, len(op.args))) + lines.append('rpy_shadowstack = %s + %d;' % (marker, len(op.args) - 1)) return '\n'.join(lines) def OP_GC_SS_RELOAD(self, funcgen, op): From noreply at buildbot.pypy.org Tue May 20 17:59:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 17:59:00 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: Revert partially 35fc2a0ae50c: found out that using r15 to return exceptions Message-ID: <20140520155900.B3E0E1C3339@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71604:ed8c9de9b293 Date: 2014-05-20 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/ed8c9de9b293/ Log: Revert partially 35fc2a0ae50c: found out that using r15 to return exceptions gives no measurable speed improvement diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -438,9 +438,6 @@ 'gc_gettypeptr_group': LLOp(canfold=True), 'get_member_index': LLOp(canfold=True), - 'getfield_exc_type': LLOp(sideeffects=False), - 'setfield_exc_type': LLOp(), - # __________ used by the JIT ________ 'jit_marker': LLOp(), diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -224,18 +224,5 @@ pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_root_stack_top = s; } -#define OP_GETFIELD_EXC_TYPE(r) \ - if (__builtin_expect(((Signed)rpy_shadowstack) & 1, 0)) { \ - r = (struct pypy_object_vtable0 *)(((char *)rpy_shadowstack) - 1); \ - if (!r) __builtin_unreachable(); \ - } \ - else { \ - r = NULL; \ - } -#define OP_SETFIELD_EXC_TYPE(x, r) \ - rpy_shadowstack = (x) ? \ - (struct rpy_shadowstack_s *)(((char *)x) + 1) \ - : NULL - #endif diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -67,19 +67,17 @@ self.c_n_i_error_ll_exc_type = constant_value(n_i_error_ll_exc_type) def rpyexc_occured(): - exc_type = lloperation.llop.getfield_exc_type( - self.lltype_of_exception_type) + exc_type = exc_data.exc_type return bool(exc_type) def rpyexc_fetch_type(): - return lloperation.llop.getfield_exc_type( - self.lltype_of_exception_type) + return exc_data.exc_type def rpyexc_fetch_value(): return exc_data.exc_value def rpyexc_clear(): - lloperation.llop.setfield_exc_type(lltype.Void, null_type) + exc_data.exc_type = null_type exc_data.exc_value = null_value def rpyexc_raise(etype, evalue): @@ -92,12 +90,12 @@ # us to see at least part of the traceback for them. ll_assert(etype != assertion_error_ll_exc_type, "AssertionError") ll_assert(etype != n_i_error_ll_exc_type, "NotImplementedError") - lloperation.llop.setfield_exc_type(lltype.Void, etype) + exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_start_traceback(lltype.Void, etype) def rpyexc_reraise(etype, evalue): - lloperation.llop.setfield_exc_type(lltype.Void, etype) + exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_reraise_traceback(lltype.Void, etype) @@ -108,8 +106,7 @@ def rpyexc_restore_exception(evalue): if evalue: - lloperation.llop.setfield_exc_type(lltype.Void, - ll_inst_type(evalue)) + exc_data.exc_type = ll_inst_type(evalue) exc_data.exc_value = evalue self.rpyexc_occured_ptr = self.build_func( @@ -146,15 +143,15 @@ lltype.Void, jitcallkind='rpyexc_raise') # for the JIT - #self.rpyexc_fetch_exception_ptr = self.build_func( - # "RPyFetchException", - # rpyexc_fetch_exception, - # [], self.lltype_of_exception_value) + self.rpyexc_fetch_exception_ptr = self.build_func( + "RPyFetchException", + rpyexc_fetch_exception, + [], self.lltype_of_exception_value) - #self.rpyexc_restore_exception_ptr = self.build_func( - # "RPyRestoreException", - # self.noinline(rpyexc_restore_exception), - # [self.lltype_of_exception_value], lltype.Void) + self.rpyexc_restore_exception_ptr = self.build_func( + "RPyRestoreException", + self.noinline(rpyexc_restore_exception), + [self.lltype_of_exception_value], lltype.Void) self.build_extra_funcs() @@ -464,6 +461,7 @@ def setup_excdata(self): EXCDATA = lltype.Struct('ExcData', + ('exc_type', self.lltype_of_exception_type), ('exc_value', self.lltype_of_exception_value)) self.EXCDATA = EXCDATA @@ -484,17 +482,11 @@ return Constant(fn_ptr, lltype.Ptr(FUNC_TYPE)) def gen_getfield(self, name, llops): - if name == 'exc_type': - return llops.genop('getfield_exc_type', [], - resulttype = self.lltype_of_exception_type) c_name = inputconst(lltype.Void, name) return llops.genop('getfield', [self.cexcdata, c_name], resulttype = getattr(self.EXCDATA, name)) def gen_setfield(self, name, v_value, llops): - if name == 'exc_type': - llops.genop('setfield_exc_type', [v_value]) - return c_name = inputconst(lltype.Void, name) llops.genop('setfield', [self.cexcdata, c_name, v_value]) @@ -523,7 +515,6 @@ exc_data = self.exc_data_ptr def rpyexc_get_exception_addr(): - raise NotImplementedError return (llmemory.cast_ptr_to_adr(exc_data) + llmemory.offsetof(EXCDATA, 'exc_type')) From noreply at buildbot.pypy.org Tue May 20 18:21:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 18:21:24 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Some extra instructions Message-ID: <20140520162124.BA5271C03C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71605:9180fdad3419 Date: 2014-05-13 21:02 +0200 http://bitbucket.org/pypy/pypy/changeset/9180fdad3419/ Log: Some extra instructions diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -522,7 +522,7 @@ # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations - 'bextr', + 'andn', 'bextr', 'blsi', 'blsmask', 'blsr', 'tzcnt', 'lzcnt', ]) # a partial list is hopefully good enough for now; it's all to support From noreply at buildbot.pypy.org Tue May 20 18:21:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 18:21:26 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: issue1769: trying to increase some limits in the SWEEPING phase. Now it Message-ID: <20140520162126.16B341C03C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71606:7002c3cc8d4c Date: 2014-05-16 20:35 +0200 http://bitbucket.org/pypy/pypy/changeset/7002c3cc8d4c/ Log: issue1769: trying to increase some limits in the SWEEPING phase. Now it should be guaranteed that most steps during this phase should walk (and possibly free) at least '3 * nursery_size' bytes. More precisely, that's all steps but two of them, at the end of the two halves of this phase. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1861,20 +1861,26 @@ #END MARKING elif self.gc_state == STATE_SWEEPING: # - # Walk all rawmalloced objects and free the ones that don't - # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. - limit = self.nursery_size // self.ac.page_size - remaining = self.free_unvisited_rawmalloc_objects_step(limit) - # - # Ask the ArenaCollection to visit a fraction of the objects. - # Free the ones that have not been visited above, and reset - # GCFLAG_VISITED on the others. Visit at most '3 * limit' - # pages minus the number of objects already visited above. - done = self.ac.mass_free_incremental(self._free_if_unvisited, - 2 * limit + remaining) + if self.raw_malloc_might_sweep.non_empty(): + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. + # This limit is conservatively high enough to guarantee that + # a total object size of at least '3 * nursery_size' bytes + # is processed. + limit = 3 * self.nursery_size // self.small_request_threshold + self.free_unvisited_rawmalloc_objects_step(limit) + done = False # the 2nd half below must still be done + else: + # Ask the ArenaCollection to visit a fraction of the objects. + # Free the ones that have not been visited above, and reset + # GCFLAG_VISITED on the others. Visit at most '3 * + # nursery_size' bytes. + limit = 3 * self.nursery_size // self.ac.page_size + done = self.ac.mass_free_incremental(self._free_if_unvisited, + limit) # XXX tweak the limits above # - if remaining > 0 and done: + if done: self.num_major_collects += 1 # # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. From noreply at buildbot.pypy.org Tue May 20 18:21:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 18:21:27 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: issue 1762: accept null bytes in the .py file named in the command-line, Message-ID: <20140520162127.5D46B1C03C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71607:463b508891cc Date: 2014-05-18 14:50 +0200 http://bitbucket.org/pypy/pypy/changeset/463b508891cc/ Log: issue 1762: accept null bytes in the .py file named in the command- line, in addition to files that are imported. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -590,6 +590,11 @@ # handle the case where no command/filename/module is specified # on the command-line. + try: + from _ast import PyCF_ACCEPT_NULL_BYTES + except ImportError: + PyCF_ACCEPT_NULL_BYTES = 0 + # update sys.path *after* loading site.py, in case there is a # "site.py" file in the script's directory. Only run this if we're # executing the interactive prompt, if we're running a script we @@ -613,7 +618,8 @@ def run_it(): co_python_startup = compile(startup, python_startup, - 'exec') + 'exec', + PyCF_ACCEPT_NULL_BYTES) exec co_python_startup in mainmodule.__dict__ mainmodule.__file__ = python_startup run_toplevel(run_it) @@ -626,7 +632,8 @@ else: # If not interactive, just read and execute stdin normally. def run_it(): - co_stdin = compile(sys.stdin.read(), '', 'exec') + co_stdin = compile(sys.stdin.read(), '', 'exec', + PyCF_ACCEPT_NULL_BYTES) exec co_stdin in mainmodule.__dict__ mainmodule.__file__ = '' success = run_toplevel(run_it) diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -22,3 +22,4 @@ PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 PyCF_ONLY_AST = 0x0400 +PyCF_ACCEPT_NULL_BYTES = 0x10000000 # PyPy only, for compile() diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -24,7 +24,8 @@ """ ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | - consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): + consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8 | + consts.PyCF_ACCEPT_NULL_BYTES): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) @@ -53,9 +54,10 @@ else: source = space.readbuf_w(w_source).as_str() - if '\x00' in source: - raise OperationError(space.w_TypeError, space.wrap( - "compile() expected string without null bytes")) + if not (flags & consts.PyCF_ACCEPT_NULL_BYTES): + if '\x00' in source: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: code = ec.compiler.compile_to_ast(source, filename, mode, flags) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -610,6 +610,16 @@ firstlineno = co.co_firstlineno assert firstlineno == 2 + def test_compile_null_bytes(self): + import _ast + raises(TypeError, compile, '\x00', 'mymod', 'exec', 0) + raises(SyntaxError, compile, '\x00', 'mymod', 'exec', + _ast.PyCF_ACCEPT_NULL_BYTES) + src = "#abc\x00def\n" + raises(TypeError, compile, src, 'mymod', 'exec') + raises(TypeError, compile, src, 'mymod', 'exec', 0) + compile(src, 'mymod', 'exec', _ast.PyCF_ACCEPT_NULL_BYTES) # works + def test_print_function(self): import __builtin__ import sys diff --git a/pypy/module/_ast/__init__.py b/pypy/module/_ast/__init__.py --- a/pypy/module/_ast/__init__.py +++ b/pypy/module/_ast/__init__.py @@ -6,6 +6,8 @@ interpleveldefs = { "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST, + "PyCF_ACCEPT_NULL_BYTES": + "space.wrap(%s)" % consts.PyCF_ACCEPT_NULL_BYTES, "__version__" : "space.wrap('82160')", # from CPython's svn. } appleveldefs = {} From noreply at buildbot.pypy.org Tue May 20 18:21:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 18:21:28 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: issue 1752: the peek() method must not create a string slice. This leads Message-ID: <20140520162128.8F4A31C03C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71608:d204591b2495 Date: 2014-05-18 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/d204591b2495/ Log: issue 1752: the peek() method must not create a string slice. This leads to bogus complexity as soon as the buffer size is more than some small number. diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -458,9 +458,7 @@ return result def peek(self): - pos = self.pos - assert pos >= 0 - return self.buffer[pos:] + return (self.pos, self.buffer) def try_to_find_file_descriptor(self): return self.stream.try_to_find_file_descriptor() diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -234,11 +234,12 @@ while True: # "peeks" on the underlying stream to see how many characters # we can safely read without reading past an end-of-line - peeked = self.peek() - pn = peeked.find("\n") + startindex, peeked = self.peek() + assert 0 <= startindex <= len(peeked) + pn = peeked.find("\n", startindex) if pn < 0: pn = len(peeked) - c = self.read(pn + 1) + c = self.read(pn - startindex + 1) if not c: break result.append(c) @@ -265,7 +266,7 @@ pass def peek(self): - return '' + return (0, '') def try_to_find_file_descriptor(self): return -1 @@ -705,9 +706,7 @@ return "".join(chunks) def peek(self): - pos = self.pos - assert pos >= 0 - return self.buf[pos:] + return (self.pos, self.buf) write = PassThrough("write", flush_buffers=True) truncate = PassThrough("truncate", flush_buffers=True) @@ -970,12 +969,13 @@ while True: # "peeks" on the underlying stream to see how many characters # we can safely read without reading past an end-of-line - peeked = self.base.peek() - pn = peeked.find("\n") - pr = peeked.find("\r") + startindex, peeked = self.base.peek() + assert 0 <= startindex <= len(peeked) + pn = peeked.find("\n", startindex) + pr = peeked.find("\r", startindex) if pn < 0: pn = len(peeked) if pr < 0: pr = len(peeked) - c = self.read(min(pn, pr) + 1) + c = self.read(min(pn, pr) - startindex + 1) if not c: break result.append(c) @@ -1028,7 +1028,7 @@ self.buf = "" def peek(self): - return self.buf + return (0, self.buf) write = PassThrough("write", flush_buffers=True) truncate = PassThrough("truncate", flush_buffers=True) From noreply at buildbot.pypy.org Tue May 20 18:21:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 May 2014 18:21:29 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Translation fix for 3777204fff8e Message-ID: <20140520162129.BF3641C03C4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71609:d2dda83f486d Date: 2014-05-19 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/d2dda83f486d/ Log: Translation fix for 3777204fff8e diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -209,11 +209,13 @@ while size > 0: # "peeks" on the underlying stream to see how many chars # we can safely read without reading past an end-of-line - peeked = stream.peek() - pn = peeked.find("\n", 0, size) + startindex, peeked = stream.peek() + assert 0 <= startindex <= len(peeked) + endindex = startindex + size + pn = peeked.find("\n", startindex, endindex) if pn < 0: - pn = min(size-1, len(peeked)) - c = stream.read(pn + 1) + pn = min(endindex - 1, len(peeked)) + c = stream.read(pn - startindex + 1) if not c: break result.append(c) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -554,7 +554,7 @@ else: difpos = offset if -self.pos <= difpos <= currentsize: - self.pos += difpos + self.pos += intmask(difpos) return if whence == 1: offset -= currentsize From noreply at buildbot.pypy.org Tue May 20 18:21:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:30 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: document changes from 2.3, reorganize historical release notes Message-ID: <20140520162130.E1F831C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71610:5de2cea8e468 Date: 2014-05-20 00:31 +0300 http://bitbucket.org/pypy/pypy/changeset/5de2cea8e468/ Log: document changes from 2.3, reorganize historical release notes diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -1,19 +1,42 @@ Historical release notes ------------------------- +======================== + +Cpython 2.7 compatible versions +=============================== .. toctree:: + release-2.3.0.rst + release-2.2.1.rst + release-2.2.0.rst + release-2.1.0.rst + release-2.1.0-beta2.rst + release-2.1.0-beta1.rst + release-2.1.0.rst + release-2.0.2.rst + release-2.0.1.rst + release-2.0.0.rst + release-2.0.0-beta2.rst + release-2.0.0-beta1.rst + release-1.9.0.rst + release-1.8.0.rst + release-1.7.0.rst + release-1.6.0.rst + release-1.5.0.rst + release-1.4.1.rst + release-1.4.0beta.rst + release-1.4.0.rst + release-1.3.0.rst + release-1.2.0.rst + release-1.1.0.rst + release-1.0.0.rst + release-0.99.0.rst + release-0.9.0.rst + release-0.8.0.rst + release-0.7.0.rst release-0.6 - release-0.7.0.rst - release-0.8.0.rst - release-0.9.0.rst - release-0.99.0.rst - release-1.0.0.rst - release-1.1.0.rst - release-1.2.0.rst - release-1.3.0.rst - release-1.4.0.rst - release-1.4.0beta.rst - release-1.4.1.rst - release-1.5.0.rst - release-1.6.0.rst + +Cpython 3.2 compatible versions +=============================== +.. toctree:: + release-pypy3-2.1.0-beta1.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,6 +1,16 @@ ======================= -What's new in PyPy 2.3+ +What's new in PyPy 2.4+ ======================= .. this is a revision shortly after release-2.3.x .. startrev: b2cc67adbaad + +Added support for the stdlib gdbm module via cffi + +Fixes for issues #1769, #1764, #1762, #1752 + +Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` + +Support compilation with gcc-4.9 + +Annotator cleanups From noreply at buildbot.pypy.org Tue May 20 18:21:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:32 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: start to create 2.3.1 Message-ID: <20140520162132.13FC91C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71611:9ccfc38e7447 Date: 2014-05-20 00:48 +0300 http://bitbucket.org/pypy/pypy/changeset/9ccfc38e7447/ Log: start to create 2.3.1 diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.1.rst @@ -0,0 +1,11 @@ +======================= +What's new since PyPy 2.3? +======================= + +.. this is a revision shortly after release-2.3 +.. startrev: 394146e9bb67 + +Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` + +Support compilation with gcc-4.9 + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,8 +9,4 @@ Fixes for issues #1769, #1764, #1762, #1752 -Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` - -Support compilation with gcc-4.9 - Annotator cleanups From noreply at buildbot.pypy.org Tue May 20 18:21:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:33 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: update version numbers to 2.3.1 (doc/*.rst changes must be copied to default when we release) Message-ID: <20140520162133.300B41C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-2.3.x Changeset: r71612:7158396acaba Date: 2014-05-20 01:17 +0300 http://bitbucket.org/pypy/pypy/changeset/7158396acaba/ Log: update version numbers to 2.3.1 (doc/*.rst changes must be copied to default when we release) diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.3.0' +release = '2.3.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,11 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* merge PYPY_IRC_TOPIC environment variable handling from previous release - in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and - pypy/interpreter/app_main.py so release versions will not print a random - IRC topic by default. -* change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py * wait for builds to complete, make sure there are no failures diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.3.0`_: the latest official release +* `Release 2.3.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.6" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0" +#define PYPY_VERSION "2.3.1" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 3, 0, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 3, 1, "final", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Tue May 20 18:21:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:34 +0200 (CEST) Subject: [pypy-commit] pypy default: document changes from 2.3, reorganize historical release notes Message-ID: <20140520162134.4DEB41C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71613:d6b02fb06861 Date: 2014-05-20 00:31 +0300 http://bitbucket.org/pypy/pypy/changeset/d6b02fb06861/ Log: document changes from 2.3, reorganize historical release notes diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -1,19 +1,42 @@ Historical release notes ------------------------- +======================== + +Cpython 2.7 compatible versions +=============================== .. toctree:: + release-2.3.0.rst + release-2.2.1.rst + release-2.2.0.rst + release-2.1.0.rst + release-2.1.0-beta2.rst + release-2.1.0-beta1.rst + release-2.1.0.rst + release-2.0.2.rst + release-2.0.1.rst + release-2.0.0.rst + release-2.0.0-beta2.rst + release-2.0.0-beta1.rst + release-1.9.0.rst + release-1.8.0.rst + release-1.7.0.rst + release-1.6.0.rst + release-1.5.0.rst + release-1.4.1.rst + release-1.4.0beta.rst + release-1.4.0.rst + release-1.3.0.rst + release-1.2.0.rst + release-1.1.0.rst + release-1.0.0.rst + release-0.99.0.rst + release-0.9.0.rst + release-0.8.0.rst + release-0.7.0.rst release-0.6 - release-0.7.0.rst - release-0.8.0.rst - release-0.9.0.rst - release-0.99.0.rst - release-1.0.0.rst - release-1.1.0.rst - release-1.2.0.rst - release-1.3.0.rst - release-1.4.0.rst - release-1.4.0beta.rst - release-1.4.1.rst - release-1.5.0.rst - release-1.6.0.rst + +Cpython 3.2 compatible versions +=============================== +.. toctree:: + release-pypy3-2.1.0-beta1.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,6 +1,16 @@ ======================= -What's new in PyPy 2.3+ +What's new in PyPy 2.4+ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: f556d32f8319 +.. startrev: b2cc67adbaad + +Added support for the stdlib gdbm module via cffi + +Fixes for issues #1769, #1764, #1762, #1752 + +Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` + +Support compilation with gcc-4.9 + +Annotator cleanups From noreply at buildbot.pypy.org Tue May 20 18:21:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:35 +0200 (CEST) Subject: [pypy-commit] pypy default: make test pass Message-ID: <20140520162135.7FE6F1C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71614:06b574e492e4 Date: 2014-05-20 00:39 +0300 http://bitbucket.org/pypy/pypy/changeset/06b574e492e4/ Log: make test pass diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -14,3 +14,6 @@ Support compilation with gcc-4.9 Annotator cleanups + +.. branch: release-2.3.x + From noreply at buildbot.pypy.org Tue May 20 18:21:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:36 +0200 (CEST) Subject: [pypy-commit] pypy default: start to create 2.3.1 Message-ID: <20140520162136.B1DE91C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71615:249366852219 Date: 2014-05-20 00:48 +0300 http://bitbucket.org/pypy/pypy/changeset/249366852219/ Log: start to create 2.3.1 diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.1.rst @@ -0,0 +1,11 @@ +======================= +What's new since PyPy 2.3? +======================= + +.. this is a revision shortly after release-2.3 +.. startrev: 394146e9bb67 + +Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` + +Support compilation with gcc-4.9 + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -9,10 +9,6 @@ Fixes for issues #1769, #1764, #1762, #1752 -Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` - -Support compilation with gcc-4.9 - Annotator cleanups .. branch: release-2.3.x From noreply at buildbot.pypy.org Tue May 20 18:21:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:37 +0200 (CEST) Subject: [pypy-commit] pypy default: document grafted changes Message-ID: <20140520162137.D16A71C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71616:fa5993689db1 Date: 2014-05-20 01:11 +0300 http://bitbucket.org/pypy/pypy/changeset/fa5993689db1/ Log: document grafted changes diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst --- a/pypy/doc/whatsnew-2.3.1.rst +++ b/pypy/doc/whatsnew-2.3.1.rst @@ -9,3 +9,5 @@ Support compilation with gcc-4.9 +Fixes for issues #1769, #1764, #1762, #1752 + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,8 +7,6 @@ Added support for the stdlib gdbm module via cffi -Fixes for issues #1769, #1764, #1762, #1752 - Annotator cleanups .. branch: release-2.3.x From noreply at buildbot.pypy.org Tue May 20 18:21:38 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 20 May 2014 18:21:38 +0200 (CEST) Subject: [pypy-commit] pypy default: not relevant Message-ID: <20140520162138.EC2201C03C4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71617:9e70dfd8c9de Date: 2014-05-20 01:19 +0300 http://bitbucket.org/pypy/pypy/changeset/9e70dfd8c9de/ Log: not relevant diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,7 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py * wait for builds to complete, make sure there are no failures From noreply at buildbot.pypy.org Tue May 20 18:39:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 18:39:56 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: fix clearing of cards in some cases (test_random) Message-ID: <20140520163956.2A2881C1106@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1223:ac2d8c48ced3 Date: 2014-05-20 18:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/ac2d8c48ced3/ Log: fix clearing of cards in some cases (test_random) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -49,7 +49,7 @@ /* not yet in the list */ if (STM_PSEGMENT->old_objects_with_cards) { /* if we never had a minor collection in this transaction, - this list doesn't exist */ + this list doesn't exist, we rely on modified_old_objs instead */ LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); } obj->stm_flags |= GCFLAG_CARDS_SET; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -459,7 +459,10 @@ ({ struct object_s *realobj = (struct object_s *) REAL_ADDRESS(pseg->pub.segment_base, item); + assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER)); + OPT_ASSERT(!(realobj->stm_flags & GCFLAG_CARDS_SET)); + realobj->stm_flags |= GCFLAG_WRITE_BARRIER; })); list_clear(lst); @@ -473,6 +476,16 @@ _reset_object_cards(&pseg->pub, item); })); list_clear(lst); + } else { + LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, + { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { + _reset_object_cards(&pseg->pub, item); + } + }); } /* Remove from 'large_overflow_objects' all objects that die */ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -195,8 +195,12 @@ uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; uintptr_t last_card_index = get_card_index(size - 1); - OPT_ASSERT(last_card_index >= card_index); + while (card_index <= last_card_index) { + #ifndef NDEBUG + if (write_locks[first_card_index + card_index]) + dprintf(("cleared card %lu on %p\n", card_index, obj)); + #endif write_locks[first_card_index + card_index] = 0; card_index++; } @@ -236,8 +240,7 @@ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); - _reset_object_cards( - get_segment(STM_SEGMENT->segment_num), obj); + _reset_object_cards(get_segment(STM_SEGMENT->segment_num), obj); } @@ -257,8 +260,7 @@ obj->stm_flags |= GCFLAG_WRITE_BARRIER; if (obj->stm_flags & GCFLAG_CARDS_SET) { - _reset_object_cards( - get_segment(STM_SEGMENT->segment_num), obj); + _reset_object_cards(get_segment(STM_SEGMENT->segment_num), obj); } } if (obj->stm_flags & GCFLAG_CARDS_SET) { _trace_card_object(obj); @@ -275,6 +277,7 @@ assert(obj->stm_flags & GCFLAG_CARDS_SET); _collect_now(obj); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); } } @@ -287,6 +290,7 @@ object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); _collect_now(obj); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); if (obj_sync_now & FLAG_SYNC_LARGE) { /* this was a large object. We must either synchronize the @@ -376,6 +380,13 @@ if (pseg->old_objects_with_cards) { LIST_FOREACH_R(pseg->old_objects_with_cards, object_t * /*item*/, _reset_object_cards(&pseg->pub, item)); + } else { + LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, + { + if (item->stm_flags & GCFLAG_CARDS_SET) { + _reset_object_cards(&pseg->pub, item); + } + }); } return nursery_used; diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -378,7 +378,7 @@ num = str(global_state.rnd.randrange(1, 100)) r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) - ex.do('%s = stm_allocate_refs(%s)' % (r, num)) + ex.do('%s = stm_allocate_refs(%s, True)' % (r, num)) ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, "ffi.NULL", True) @@ -438,9 +438,9 @@ thread_state.abort_transaction() offset = global_state.get_root_size(r) + " - 1" if is_ref: - ex.do(raising_call(aborts, "stm_set_ref", r, offset, v)) + ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, "True")) if not aborts: - ex.do(raising_call(False, "stm_set_ref", r, "0", v)) + ex.do(raising_call(False, "stm_set_ref", r, "0", v, "True")) else: ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset)) if not aborts: @@ -562,7 +562,7 @@ global_state.prebuilt_roots.append(r) r = global_state.get_new_root_name(True, "50") - ex.do('%s = stm_allocate_old_refs(50)' % r) + ex.do('%s = stm_allocate_old_refs(50, True)' % r) global_state.committed_transaction_state.add_root(r, "ffi.NULL", False) global_state.prebuilt_roots.append(r) global_state.committed_transaction_state.write_set = set() From noreply at buildbot.pypy.org Tue May 20 18:39:57 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 20 May 2014 18:39:57 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: remove invalid assert and fix wrong use of segment-local reference Message-ID: <20140520163957.286E41C1106@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1224:f88edde120b2 Date: 2014-05-20 18:39 +0200 http://bitbucket.org/pypy/stmgc/changeset/f88edde120b2/ Log: remove invalid assert and fix wrong use of segment-local reference diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -181,8 +181,6 @@ } } else if (write_locks[base_lock_idx] == lock_num) { - assert(IMPLY(!(obj->stm_flags & GCFLAG_CARDS_SET), - STM_PSEGMENT->objects_pointing_to_nursery != NULL)); #ifdef STM_TESTS bool found = false; LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -383,7 +383,10 @@ } else { LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, { - if (item->stm_flags & GCFLAG_CARDS_SET) { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { _reset_object_cards(&pseg->pub, item); } }); From noreply at buildbot.pypy.org Tue May 20 18:57:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 May 2014 18:57:20 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix. Message-ID: <20140520165720.99DB91C3331@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71618:34a46c3da6ce Date: 2014-05-20 04:05 +0200 http://bitbucket.org/pypy/pypy/changeset/34a46c3da6ce/ Log: Fix. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -85,7 +85,8 @@ size = stop - start if size < 0: size = 0 - buf = SubBuffer(self.buf, start, size) + buf = SubBuffer(self.buf, start * self.buf.itemsize, + size * self.buf.itemsize) return W_MemoryView(buf) def descr_tobytes(self, space): From noreply at buildbot.pypy.org Tue May 20 18:57:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 20 May 2014 18:57:21 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Move contents from ronan.rst to translation.rst. Message-ID: <20140520165721.E54261C3331@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r71619:d7c76564bfd8 Date: 2014-05-20 18:56 +0200 http://bitbucket.org/pypy/pypy/changeset/d7c76564bfd8/ Log: Move contents from ronan.rst to translation.rst. diff --git a/rpython/doc/ronan.rst b/rpython/doc/ronan.rst deleted file mode 100644 --- a/rpython/doc/ronan.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. - @Ronan: This is the old documentation for the flow object space and flow model. - Please integrate (and edit when needed) this into the new section in the document rpython/doc/translation.rst (section "Building Flow Graphs"). - - -.. _flow-object-space: - -Building Flow Graphs --------------------- - -Introduction -~~~~~~~~~~~~ - -The task of the flow graph builder (the source is at :source:`rpython/flowspace/`) -is to generate a control-flow graph from a function. This graph will also -contain a trace of the individual operations, so that it is actually just an -alternate representation for the function. - -The basic idea is that if an interpreter is given a function, e.g.:: - - def f(n): - return 3*n+2 - -it will compile it to bytecode and then execute it on its VM. -Instead, the flow graph builder contains an `abstract interpreter`_ which takes the bytecode -and performs whatever stack-shuffling and variable juggling is needed, but -merely records any actual operation performed on a Python object into -a structure called a basic block. The result of the operation is represented by a -placeholder value that can appear in further operations. - -.. _abstract interpreter: http://en.wikipedia.org/wiki/Abstract_interpretation - -For example, if the placeholder ``v1`` is given as the argument to the above -function, the bytecode interpreter will call ``v2 = space.mul(space.wrap(3), -v1)`` and then ``v3 = space.add(v2, space.wrap(2))`` and return ``v3`` as the -result. During these calls, the following block is recorded:: - - Block(v1): # input argument - v2 = mul(Constant(3), v1) - v3 = add(v2, Constant(2)) - - -Abstract interpretation -~~~~~~~~~~~~~~~~~~~~~~~ - -``build_flow()`` works by recording all operations issued by the bytecode -interpreter into basic blocks. A basic block ends in one of two cases: when -the bytecode interpreters calls ``is_true()``, or when a joinpoint is reached. - -* A joinpoint occurs when the next operation is about to be recorded into the - current block, but there is already another block that records an operation - for the same bytecode position. This means that the bytecode interpreter - has closed a loop and is interpreting already-seen code again. In this - situation, we interrupt the bytecode interpreter and we make a link from the - end of the current block back to the previous block, thus closing the loop - in the flow graph as well. (Note that this occurs only when an operation is - about to be recorded, which allows some amount of constant-folding.) - -* If the bytecode interpreter calls ``is_true()``, the abstract interpreter doesn't - generally know if the answer should be True or False, so it puts a - conditional jump and generates two successor blocks for the current basic - block. There is some trickery involved so that the bytecode interpreter is - fooled into thinking that ``is_true()`` first returns False (and the - subsequent operations are recorded in the first successor block), and later - the *same* call to ``is_true()`` also returns True (and the subsequent - operations go this time to the other successor block). - -(This section to be extended...) - - - -.. _flow-model: - -The Flow Model --------------- - -Here we describe the data structures produced by ``build_flow()``, which are -the basic data structures of the translation process. - -All these types are defined in :source:`rpython/flowspace/model.py` (which is a -rather important module in the PyPy source base, to reinforce the point). - -The flow graph of a function is represented by the class ``FunctionGraph``. -It contains a reference to a collection of ``Block``\ s connected by ``Link``\ s. - -A ``Block`` contains a list of ``SpaceOperation``\ s. Each ``SpaceOperation`` -has an ``opname`` and a list of ``args`` and ``result``, which are either -``Variable``\ s or ``Constant``\ s. - -We have an extremely useful PyGame viewer, which allows you to visually -inspect the graphs at various stages of the translation process (very -useful to try to work out why things are breaking). It looks like this: - - .. image:: _static/bpnn_update.png - -It is recommended to play with ``python bin/translatorshell.py`` on a few -examples to get an idea of the structure of flow graphs. The following describes -the types and their attributes in some detail: - - -``FunctionGraph`` - A container for one graph (corresponding to one function). - - :startblock: the first block. It is where the control goes when the - function is called. The input arguments of the startblock - are the function's arguments. If the function takes a - ``*args`` argument, the ``args`` tuple is given as the last - input argument of the startblock. - - :returnblock: the (unique) block that performs a function return. It is - empty, not actually containing any ``return`` operation; the - return is implicit. The returned value is the unique input - variable of the returnblock. - - :exceptblock: the (unique) block that raises an exception out of the - function. The two input variables are the exception class - and the exception value, respectively. (No other block will - actually link to the exceptblock if the function does not - explicitly raise exceptions.) - - -``Block`` - A basic block, containing a list of operations and ending in jumps to other - basic blocks. All the values that are "live" during the execution of the - block are stored in Variables. Each basic block uses its own distinct - Variables. - - :inputargs: list of fresh, distinct Variables that represent all the - values that can enter this block from any of the previous - blocks. - - :operations: list of SpaceOperations. - :exitswitch: see below - - :exits: list of Links representing possible jumps from the end of this - basic block to the beginning of other basic blocks. - - Each Block ends in one of the following ways: - - * unconditional jump: exitswitch is None, exits contains a single Link. - - * conditional jump: exitswitch is one of the Variables that appear in the - Block, and exits contains one or more Links (usually 2). Each Link's - exitcase gives a concrete value. This is the equivalent of a "switch": - the control follows the Link whose exitcase matches the run-time value of - the exitswitch Variable. It is a run-time error if the Variable doesn't - match any exitcase. - - * exception catching: exitswitch is ``Constant(last_exception)``. The first - Link has exitcase set to None and represents the non-exceptional path. - The next Links have exitcase set to a subclass of Exception, and are taken - when the *last* operation of the basic block raises a matching exception. - (Thus the basic block must not be empty, and only the last operation is - protected by the handler.) - - * return or except: the returnblock and the exceptblock have operations set - to an empty tuple, exitswitch to None, and exits empty. - - -``Link`` - A link from one basic block to another. - - :prevblock: the Block that this Link is an exit of. - - :target: the target Block to which this Link points to. - - :args: a list of Variables and Constants, of the same size as the - target Block's inputargs, which gives all the values passed - into the next block. (Note that each Variable used in the - prevblock may appear zero, one or more times in the ``args`` - list.) - - :exitcase: see above. - - :last_exception: None or a Variable; see below. - - :last_exc_value: None or a Variable; see below. - - Note that ``args`` uses Variables from the prevblock, which are matched to - the target block's ``inputargs`` by position, as in a tuple assignment or - function call would do. - - If the link is an exception-catching one, the ``last_exception`` and - ``last_exc_value`` are set to two fresh Variables that are considered to be - created when the link is entered; at run-time, they will hold the exception - class and value, respectively. These two new variables can only be used in - the same link's ``args`` list, to be passed to the next block (as usual, - they may actually not appear at all, or appear several times in ``args``). - - -``SpaceOperation`` - A recorded (or otherwise generated) basic operation. - - :opname: the name of the operation. ``build_flow()`` produces only operations - from the list in ``rpython.flowspace.operation``, but later the - names can be changed arbitrarily. - - :args: list of arguments. Each one is a Constant or a Variable seen - previously in the basic block. - - :result: a *new* Variable into which the result is to be stored. - - Note that operations usually cannot implicitly raise exceptions at run-time; - so for example, code generators can assume that a ``getitem`` operation on a - list is safe and can be performed without bound checking. The exceptions to - this rule are: (1) if the operation is the last in the block, which ends - with ``exitswitch == Constant(last_exception)``, then the implicit - exceptions must be checked for, generated, and caught appropriately; (2) - calls to other functions, as per ``simple_call`` or ``call_args``, can - always raise whatever the called function can raise --- and such exceptions - must be passed through to the parent unless they are caught as above. - - -``Variable`` - A placeholder for a run-time value. There is mostly debugging stuff here. - - :name: it is good style to use the Variable object itself instead of its - ``name`` attribute to reference a value, although the ``name`` is - guaranteed unique. - - -``Constant`` - A constant value used as argument to a SpaceOperation, or as value to pass - across a Link to initialize an input Variable in the target Block. - - :value: the concrete value represented by this Constant. - :key: a hashable object representing the value. - - A Constant can occasionally store a mutable Python object. It represents a - static, pre-initialized, read-only version of that object. The flow graph - should not attempt to actually mutate such Constants. diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -101,7 +101,227 @@ Building Flow Graphs -------------------- -.. @Ronan: here +Introduction +~~~~~~~~~~~~ + +The task of the flow graph builder (the source is at :source:`rpython/flowspace/`) +is to generate a control-flow graph from a function. This graph will also +contain a trace of the individual operations, so that it is actually just an +alternate representation for the function. + +The basic idea is that if an interpreter is given a function, e.g.:: + + def f(n): + return 3*n+2 + +it will compile it to bytecode and then execute it on its VM. +Instead, the flow graph builder contains an `abstract interpreter`_ which takes the bytecode +and performs whatever stack-shuffling and variable juggling is needed, but +merely records any actual operation performed on a Python object into +a structure called a basic block. The result of the operation is represented by a +placeholder value that can appear in further operations. + +.. _abstract interpreter: http://en.wikipedia.org/wiki/Abstract_interpretation + +For example, if the placeholder ``v1`` is given as the argument to the above +function, the bytecode interpreter will call ``v2 = space.mul(space.wrap(3), +v1)`` and then ``v3 = space.add(v2, space.wrap(2))`` and return ``v3`` as the +result. During these calls, the following block is recorded:: + + Block(v1): # input argument + v2 = mul(Constant(3), v1) + v3 = add(v2, Constant(2)) + + +Abstract interpretation +~~~~~~~~~~~~~~~~~~~~~~~ + +``build_flow()`` works by recording all operations issued by the bytecode +interpreter into basic blocks. A basic block ends in one of two cases: when +the bytecode interpreters calls ``is_true()``, or when a joinpoint is reached. + +* A joinpoint occurs when the next operation is about to be recorded into the + current block, but there is already another block that records an operation + for the same bytecode position. This means that the bytecode interpreter + has closed a loop and is interpreting already-seen code again. In this + situation, we interrupt the bytecode interpreter and we make a link from the + end of the current block back to the previous block, thus closing the loop + in the flow graph as well. (Note that this occurs only when an operation is + about to be recorded, which allows some amount of constant-folding.) + +* If the bytecode interpreter calls ``is_true()``, the abstract interpreter doesn't + generally know if the answer should be True or False, so it puts a + conditional jump and generates two successor blocks for the current basic + block. There is some trickery involved so that the bytecode interpreter is + fooled into thinking that ``is_true()`` first returns False (and the + subsequent operations are recorded in the first successor block), and later + the *same* call to ``is_true()`` also returns True (and the subsequent + operations go this time to the other successor block). + +(This section to be extended...) + + + +.. _flow-model: + +The Flow Model +-------------- + +Here we describe the data structures produced by ``build_flow()``, which are +the basic data structures of the translation process. + +All these types are defined in :source:`rpython/flowspace/model.py` (which is a +rather important module in the PyPy source base, to reinforce the point). + +The flow graph of a function is represented by the class ``FunctionGraph``. +It contains a reference to a collection of ``Block``\ s connected by ``Link``\ s. + +A ``Block`` contains a list of ``SpaceOperation``\ s. Each ``SpaceOperation`` +has an ``opname`` and a list of ``args`` and ``result``, which are either +``Variable``\ s or ``Constant``\ s. + +We have an extremely useful PyGame viewer, which allows you to visually +inspect the graphs at various stages of the translation process (very +useful to try to work out why things are breaking). It looks like this: + + .. image:: _static/bpnn_update.png + +It is recommended to play with ``python bin/translatorshell.py`` on a few +examples to get an idea of the structure of flow graphs. The following describes +the types and their attributes in some detail: + + +``FunctionGraph`` + A container for one graph (corresponding to one function). + + :startblock: the first block. It is where the control goes when the + function is called. The input arguments of the startblock + are the function's arguments. If the function takes a + ``*args`` argument, the ``args`` tuple is given as the last + input argument of the startblock. + + :returnblock: the (unique) block that performs a function return. It is + empty, not actually containing any ``return`` operation; the + return is implicit. The returned value is the unique input + variable of the returnblock. + + :exceptblock: the (unique) block that raises an exception out of the + function. The two input variables are the exception class + and the exception value, respectively. (No other block will + actually link to the exceptblock if the function does not + explicitly raise exceptions.) + + +``Block`` + A basic block, containing a list of operations and ending in jumps to other + basic blocks. All the values that are "live" during the execution of the + block are stored in Variables. Each basic block uses its own distinct + Variables. + + :inputargs: list of fresh, distinct Variables that represent all the + values that can enter this block from any of the previous + blocks. + + :operations: list of SpaceOperations. + :exitswitch: see below + + :exits: list of Links representing possible jumps from the end of this + basic block to the beginning of other basic blocks. + + Each Block ends in one of the following ways: + + * unconditional jump: exitswitch is None, exits contains a single Link. + + * conditional jump: exitswitch is one of the Variables that appear in the + Block, and exits contains one or more Links (usually 2). Each Link's + exitcase gives a concrete value. This is the equivalent of a "switch": + the control follows the Link whose exitcase matches the run-time value of + the exitswitch Variable. It is a run-time error if the Variable doesn't + match any exitcase. + + * exception catching: exitswitch is ``Constant(last_exception)``. The first + Link has exitcase set to None and represents the non-exceptional path. + The next Links have exitcase set to a subclass of Exception, and are taken + when the *last* operation of the basic block raises a matching exception. + (Thus the basic block must not be empty, and only the last operation is + protected by the handler.) + + * return or except: the returnblock and the exceptblock have operations set + to an empty tuple, exitswitch to None, and exits empty. + + +``Link`` + A link from one basic block to another. + + :prevblock: the Block that this Link is an exit of. + + :target: the target Block to which this Link points to. + + :args: a list of Variables and Constants, of the same size as the + target Block's inputargs, which gives all the values passed + into the next block. (Note that each Variable used in the + prevblock may appear zero, one or more times in the ``args`` + list.) + + :exitcase: see above. + + :last_exception: None or a Variable; see below. + + :last_exc_value: None or a Variable; see below. + + Note that ``args`` uses Variables from the prevblock, which are matched to + the target block's ``inputargs`` by position, as in a tuple assignment or + function call would do. + + If the link is an exception-catching one, the ``last_exception`` and + ``last_exc_value`` are set to two fresh Variables that are considered to be + created when the link is entered; at run-time, they will hold the exception + class and value, respectively. These two new variables can only be used in + the same link's ``args`` list, to be passed to the next block (as usual, + they may actually not appear at all, or appear several times in ``args``). + + +``SpaceOperation`` + A recorded (or otherwise generated) basic operation. + + :opname: the name of the operation. ``build_flow()`` produces only operations + from the list in ``rpython.flowspace.operation``, but later the + names can be changed arbitrarily. + + :args: list of arguments. Each one is a Constant or a Variable seen + previously in the basic block. + + :result: a *new* Variable into which the result is to be stored. + + Note that operations usually cannot implicitly raise exceptions at run-time; + so for example, code generators can assume that a ``getitem`` operation on a + list is safe and can be performed without bound checking. The exceptions to + this rule are: (1) if the operation is the last in the block, which ends + with ``exitswitch == Constant(last_exception)``, then the implicit + exceptions must be checked for, generated, and caught appropriately; (2) + calls to other functions, as per ``simple_call`` or ``call_args``, can + always raise whatever the called function can raise --- and such exceptions + must be passed through to the parent unless they are caught as above. + + +``Variable`` + A placeholder for a run-time value. There is mostly debugging stuff here. + + :name: it is good style to use the Variable object itself instead of its + ``name`` attribute to reference a value, although the ``name`` is + guaranteed unique. + + +``Constant`` + A constant value used as argument to a SpaceOperation, or as value to pass + across a Link to initialize an input Variable in the target Block. + + :value: the concrete value represented by this Constant. + :key: a hashable object representing the value. + + A Constant can occasionally store a mutable Python object. It represents a + static, pre-initialized, read-only version of that object. The flow graph + should not attempt to actually mutate such Constants. .. _annotator: From noreply at buildbot.pypy.org Tue May 20 20:21:36 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 20 May 2014 20:21:36 +0200 (CEST) Subject: [pypy-commit] pypy default: SomeBuiltin analyzers can never have can_only_throw Message-ID: <20140520182136.2C5641C03C4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71620:801c1739e945 Date: 2014-05-20 19:20 +0100 http://bitbucket.org/pypy/pypy/changeset/801c1739e945/ Log: SomeBuiltin analyzers can never have can_only_throw diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -697,21 +697,28 @@ return s_next.call(bk.build_args("simple_call", [])) class __extend__(SomeBuiltin): + def simple_call(self, *args): + return self.analyser(*args) + + def call(self, args, implicit_init=False): + args_s, kwds = args.unpack() + # prefix keyword arguments with 's_' + kwds_s = {} + for key, s_value in kwds.items(): + kwds_s['s_'+key] = s_value + return self.analyser(*args_s, **kwds_s) + + +class __extend__(SomeBuiltinMethod): def _can_only_throw(self, *args): analyser_func = getattr(self.analyser, 'im_func', None) can_only_throw = getattr(analyser_func, 'can_only_throw', None) if can_only_throw is None or isinstance(can_only_throw, list): return can_only_throw - if self.s_self is not None: - return can_only_throw(self.s_self, *args) - else: - return can_only_throw(*args) + return can_only_throw(self.s_self, *args) def simple_call(self, *args): - if self.s_self is not None: - return self.analyser(self.s_self, *args) - else: - return self.analyser(*args) + return self.analyser(self.s_self, *args) simple_call.can_only_throw = _can_only_throw def call(self, args, implicit_init=False): @@ -720,10 +727,7 @@ kwds_s = {} for key, s_value in kwds.items(): kwds_s['s_'+key] = s_value - if self.s_self is not None: - return self.analyser(self.s_self, *args_s, **kwds_s) - else: - return self.analyser(*args_s, **kwds_s) + return self.analyser(self.s_self, *args_s, **kwds_s) class __extend__(SomePBC): From noreply at buildbot.pypy.org Wed May 21 00:23:51 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 21 May 2014 00:23:51 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fixes, and add HAVE_THREADS Message-ID: <20140520222351.BAA581C1106@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71621:61369511c423 Date: 2014-05-18 23:56 +0200 http://bitbucket.org/pypy/pypy/changeset/61369511c423/ Log: Fixes, and add HAVE_THREADS diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -19,8 +19,10 @@ 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', 'MAX_PREC': 'space.wrap(interp_decimal.MAX_PREC)', 'MAX_EMAX': 'space.wrap(interp_decimal.MAX_EMAX)', - 'MAX_EMIN': 'space.wrap(interp_decimal.MAX_EMIN)', - 'MAX_ETINY': 'space.wrap(interp_decimal.MAX_ETINY)', + 'MIN_EMIN': 'space.wrap(interp_decimal.MIN_EMIN)', + 'MIN_ETINY': 'space.wrap(interp_decimal.MIN_ETINY)', + + 'HAVE_THREADS': 'space.wrap(space.config.translation.thread)', } for name in rmpdec.ROUND_CONSTANTS: interpleveldefs[name] = 'space.wrap(%r)' % name diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -14,8 +14,8 @@ IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS MAX_PREC = rmpdec.MPD_MAX_PREC MAX_EMAX = rmpdec.MPD_MAX_EMAX -MAX_EMIN = rmpdec.MPD_MAX_EMIN -MAX_ETINY = rmpdec.MPD_MAX_ETINY +MIN_EMIN = rmpdec.MPD_MIN_EMIN +MIN_ETINY = rmpdec.MPD_MIN_ETINY # DEC_MINALLOC >= MPD_MINALLOC DEC_MINALLOC = 4 diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -48,3 +48,7 @@ assert issubclass(ex, _decimal.DecimalException) assert issubclass(ex, ArithmeticError) + def test_threads(self): + import _decimal + assert (_decimal.HAVE_THREADS is False or + _decimal.HAVE_THREADS is True) diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -91,8 +91,8 @@ 'MPD_IEEE_CONTEXT_MAX_BITS') MPD_MAX_PREC = platform.ConstantInteger('MPD_MAX_PREC') MPD_MAX_EMAX = platform.ConstantInteger('MPD_MAX_EMAX') - MPD_MAX_EMIN = platform.ConstantInteger('MPD_MAX_EMIN') - MPD_MAX_ETINY = platform.ConstantInteger('MPD_MAX_ETINY') + MPD_MIN_EMIN = platform.ConstantInteger('MPD_MIN_EMIN') + MPD_MIN_ETINY = platform.ConstantInteger('MPD_MIN_ETINY') MPD_MAX_SIGNAL_LIST = platform.ConstantInteger('MPD_MAX_SIGNAL_LIST') MPD_SIZE_MAX = platform.ConstantInteger('MPD_SIZE_MAX') MPD_SSIZE_MAX = platform.ConstantInteger('MPD_SSIZE_MAX') From noreply at buildbot.pypy.org Wed May 21 00:23:53 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 21 May 2014 00:23:53 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Decimal.to_integral*() Message-ID: <20140520222353.4F4911C1106@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71622:7b1c4d62bf95 Date: 2014-05-19 00:20 +0200 http://bitbucket.org/pypy/pypy/changeset/7b1c4d62bf95/ Log: Add Decimal.to_integral*() diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -313,6 +313,35 @@ status_ptr) return w_result + # Unary arithmetic functions, optional context arg + + def to_integral_w(self, space, w_rounding=None, w_context=None): + context = interp_context.ensure_context(space, w_context) + w_workctx = context.copy_w(space) + if not space.is_none(w_rounding): + w_workctx.set_rounding(space, w_rounding) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + # We round with the temporary context, but set status and + # raise errors on the global one. + rmpdec.mpd_qround_to_int(w_result.mpd, self.mpd, + w_workctx.ctx, status_ptr) + return w_result + + def to_integral_exact_w(self, space, w_rounding=None, w_context=None): + context = interp_context.ensure_context(space, w_context) + w_workctx = context.copy_w(space) + if not space.is_none(w_rounding): + w_workctx.set_rounding(space, w_rounding) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + # We round with the temporary context, but set status and + # raise errors on the global one. + rmpdec.mpd_qround_to_intx(w_result.mpd, self.mpd, + w_workctx.ctx, status_ptr) + return w_result + + # Boolean functions def is_qnan_w(self, space): return space.wrap(bool(rmpdec.mpd_isqnan(self.mpd))) @@ -690,6 +719,10 @@ __rmod__ = interp2app(W_Decimal.descr_rmod), __rdivmod__ = interp2app(W_Decimal.descr_rdivmod), __rpow__ = interp2app(W_Decimal.descr_rpow), + # Unary arithmetic functions, optional context arg + to_integral = interp2app(W_Decimal.to_integral_w), + to_integral_value = interp2app(W_Decimal.to_integral_w), + to_integral_exact = interp2app(W_Decimal.to_integral_exact_w), # copy_sign = interp2app(W_Decimal.copy_sign_w), is_qnan = interp2app(W_Decimal.is_qnan_w), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -758,3 +758,35 @@ d = Decimal( (1, (0, 2, 7, 1), 'F') ) assert d.as_tuple() == (1, (0,), 'F') + def test_c_integral(self): + Decimal = self.decimal.Decimal + Inexact = self.decimal.Inexact + localcontext = self.decimal.localcontext + ROUND_UP = self.decimal.ROUND_UP + + x = Decimal(10) + assert x.to_integral() == 10 + raises(TypeError, x.to_integral, '10') + raises(TypeError, x.to_integral, 10, 'x') + raises(TypeError, x.to_integral, 10) + + assert x.to_integral_value() == 10 + raises(TypeError, x.to_integral_value, '10') + raises(TypeError, x.to_integral_value, 10, 'x') + raises(TypeError, x.to_integral_value, 10) + + assert x.to_integral_exact() == 10 + raises(TypeError, x.to_integral_exact, '10') + raises(TypeError, x.to_integral_exact, 10, 'x') + raises(TypeError, x.to_integral_exact, 10) + + with localcontext() as c: + x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP) + assert x == Decimal('100000000000000000000000000') + + x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP) + assert x == Decimal('100000000000000000000000000') + + c.traps[Inexact] = True + raises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP) + From noreply at buildbot.pypy.org Wed May 21 00:23:54 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 21 May 2014 00:23:54 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Fix context flags dictionary. Message-ID: <20140520222354.98C3A1C1106@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71623:c15880925b71 Date: 2014-05-19 22:18 +0200 http://bitbucket.org/pypy/pypy/changeset/c15880925b71/ Log: Fix context flags dictionary. diff --git a/lib-python/3/test/test_decimal.py b/lib-python/3/test/test_decimal.py --- a/lib-python/3/test/test_decimal.py +++ b/lib-python/3/test/test_decimal.py @@ -47,6 +47,7 @@ C = import_fresh_module('decimal', fresh=['_decimal']) P = import_fresh_module('decimal', blocked=['_decimal']) +C.MAX_EMAX = P.MAX_EMAX orig_sys_decimal = sys.modules['decimal'] # fractions module must import the correct decimal module. diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -15,6 +15,7 @@ 'getcontext': 'interp_context.getcontext', 'setcontext': 'interp_context.setcontext', 'DecimalException': 'interp_signals.get(space).w_DecimalException', + 'SignalTuple': 'interp_signals.get(space).w_SignalTuple', 'IEEE_CONTEXT_MAX_BITS': 'space.wrap(interp_decimal.IEEE_CONTEXT_MAX_BITS)', 'MAX_PREC': 'space.wrap(interp_decimal.MAX_PREC)', diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -32,6 +32,13 @@ else: self.flag_ptr[0] = rffi.cast(rffi.UINT, cur_flag & ~flag) + def descr_delitem(self, space, w_key): + raise oefmt(space.w_ValueError, + "signal keys cannot be deleted") + + def descr_iter(self, space): + return space.iter(interp_signals.get(space).w_SignalTuple) + def new_signal_dict(space, flag_ptr): w_dict = space.allocate_instance(W_SignalDictMixin, @@ -44,6 +51,8 @@ 'SignalDictMixin', __getitem__ = interp2app(W_SignalDictMixin.descr_getitem), __setitem__ = interp2app(W_SignalDictMixin.descr_setitem), + __delitem__ = interp2app(W_SignalDictMixin.descr_delitem), + __iter__ = interp2app(W_SignalDictMixin.descr_iter), ) W_SignalDictMixin.typedef.acceptable_as_base_class = True diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -117,5 +117,9 @@ space.w_TypeError]), space.newdict()) + self.w_SignalTuple = space.newtuple([ + getattr(self, 'w_' + name) + for name, flag in SIGNAL_MAP]) + def get(space): return space.fromcache(SignalState) diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -25,6 +25,9 @@ bases = type(flags).__bases__ assert bases[1] is MutableMapping + assert _decimal.Inexact in flags + assert _decimal.Inexact in flags.keys() + def test_context_changes(self): import _decimal context = _decimal.getcontext() From noreply at buildbot.pypy.org Wed May 21 00:23:56 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 21 May 2014 00:23:56 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Implement Decimal.__hash__, and improve comparison with float. Message-ID: <20140520222356.1BB9C1C1106@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r71624:7d4a82d375d2 Date: 2014-05-21 00:22 +0200 http://bitbucket.org/pypy/pypy/changeset/7d4a82d375d2/ Log: Implement Decimal.__hash__, and improve comparison with float. diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -8,8 +8,17 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict, descr_set_dict, descr_del_dict) from pypy.objspace.std import unicodeobject +from pypy.objspace.std.floatobject import HASH_MODULUS, HASH_INF, HASH_NAN from pypy.module._decimal import interp_context +if HASH_MODULUS == 2**31 - 1: + INVERSE_10_MODULUS = 1503238553 +elif HASH_MODULUS == 2**61 - 1: + INVERSE_10_MODULUS = 2075258708292324556 +else: + raise NotImplementedError('Unsupported HASH_MODULUS') +assert (INVERSE_10_MODULUS * 10) % HASH_MODULUS == 1 + IEEE_CONTEXT_MAX_BITS = rmpdec.MPD_IEEE_CONTEXT_MAX_BITS MAX_PREC = rmpdec.MPD_MAX_PREC @@ -82,6 +91,78 @@ rmpdec.mpd_free(cp) return space.wrap("Decimal('%s')" % result) + def descr_hash(self, space): + if rmpdec.mpd_isspecial(self.mpd): + if rmpdec.mpd_issnan(self.mpd): + raise oefmt(space.w_TypeError, + "cannot hash a signaling NaN value") + elif rmpdec.mpd_isnan(self.mpd): + return space.wrap(HASH_NAN) + elif rmpdec.mpd_isnegative(self.mpd): + return space.wrap(-HASH_INF) + else: + return space.wrap(HASH_INF) + + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1, + zero=True) as status_ptr: + with lltype.scoped_alloc(rmpdec.MPD_CONTEXT_PTR.TO) as ctx: + rmpdec.mpd_maxcontext(ctx) + + # XXX cache these + w_p = W_Decimal.allocate(space) + rmpdec.mpd_qset_ssize(w_p.mpd, HASH_MODULUS, + ctx, status_ptr) + w_ten = W_Decimal.allocate(space) + rmpdec.mpd_qset_ssize(w_ten.mpd, 10, + ctx, status_ptr) + w_inv10_p = W_Decimal.allocate(space) + rmpdec.mpd_qset_ssize(w_inv10_p.mpd, INVERSE_10_MODULUS, + ctx, status_ptr) + + + w_exp_hash = W_Decimal.allocate(space) + w_tmp = W_Decimal.allocate(space) + exp = self.mpd.c_exp + if exp >= 0: + # 10**exp(v) % p + rmpdec.mpd_qsset_ssize(w_tmp.mpd, exp, ctx, status_ptr) + rmpdec.mpd_qpowmod( + w_exp_hash.mpd, w_ten.mpd, w_tmp.mpd, w_p.mpd, + ctx, status_ptr) + else: + # inv10_p**(-exp(v)) % p + rmpdec.mpd_qsset_ssize(w_tmp.mpd, -exp, ctx, status_ptr) + rmpdec.mpd_qpowmod( + w_exp_hash.mpd, w_inv10_p.mpd, w_tmp.mpd, w_p.mpd, + ctx, status_ptr) + # hash = (int(v) * exp_hash) % p + rmpdec.mpd_qcopy(w_tmp.mpd, self.mpd, status_ptr) + w_tmp.mpd.c_exp = 0 + rmpdec.mpd_set_positive(w_tmp.mpd) + + ctx.c_prec = rmpdec.MPD_MAX_PREC + 21 + ctx.c_emax = rmpdec.MPD_MAX_EMAX + 21 + ctx.c_emin = rmpdec.MPD_MIN_EMIN - 21 + + rmpdec.mpd_qmul(w_tmp.mpd, w_tmp.mpd, w_exp_hash.mpd, + ctx, status_ptr) + rmpdec.mpd_qrem(w_tmp.mpd, w_tmp.mpd, w_p.mpd, + ctx, status_ptr) + + result = rmpdec.mpd_qget_ssize(w_tmp.mpd, status_ptr); + if rmpdec.mpd_isnegative(self.mpd): + result = -result + if result == -1: + result = -2 + status = rffi.cast(lltype.Signed, status_ptr[0]) + if status: + if status & rmpdec.MPD_Malloc_error: + raise OperationError(space.w_MemoryError, space.w_None) + else: + raise OperationError(space.w_SystemError, space.wrap( + "Decimal.__hash__ internal error; please report")) + return space.wrap(result) + def descr_bool(self, space): return space.wrap(not rmpdec.mpd_iszero(self.mpd)) @@ -166,16 +247,19 @@ def compare(self, space, w_other, op): context = interp_context.getcontext(space) - w_err, w_other = convert_op(space, context, w_other) + w_err, w_self, w_other = convert_binop_cmp( + space, context, op, self, w_other) if w_err: return w_err - with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1) as status_ptr: - r = rmpdec.mpd_qcmp(self.mpd, w_other.mpd, status_ptr) + with lltype.scoped_alloc(rffi.CArrayPtr(rffi.UINT).TO, 1, + zero=True) as status_ptr: + r = rmpdec.mpd_qcmp(w_self.mpd, w_other.mpd, status_ptr) if r > 0xFFFF: # sNaNs or op={le,ge,lt,gt} always signal. - if (rmpdec.mpd_issnan(self.mpd) or rmpdec.mpd_issnan(w_other.mpd) - or (op not in ('eq', 'ne'))): + if (rmpdec.mpd_issnan(w_self.mpd) or + rmpdec.mpd_issnan(w_other.mpd) or + op not in ('eq', 'ne')): status = rffi.cast(lltype.Signed, status_ptr[0]) context.addstatus(space, status) # qNaN comparison with op={eq,ne} or comparison with @@ -436,6 +520,41 @@ space.type(w_y)) return w_a, w_b +def convert_binop_cmp(space, context, op, w_v, w_w): + if isinstance(w_w, W_Decimal): + return None, w_v, w_w + elif space.isinstance_w(w_w, space.w_int): + value = space.bigint_w(w_w) + w_w = decimal_from_bigint(space, None, value, context, + exact=True) + return None, w_v, w_w + elif space.isinstance_w(w_w, space.w_float): + if op not in ('eq', 'ne'): + # Add status, and maybe raise + context.addstatus(space, rmpdec.MPD_Float_operation) + else: + # Add status, but don't raise + new_status = (rmpdec.MPD_Float_operation | + rffi.cast(lltype.Signed, context.ctx.c_status)) + context.ctx.c_status = rffi.cast(rffi.UINT, new_status) + w_w = decimal_from_float(space, None, w_w, context, exact=True) + elif space.isinstance_w(w_w, space.w_complex): + if op not in ('eq', 'ne'): + return space.w_NotImplemented, None, None + real, imag = space.unpackcomplex(w_w) + if imag == 0.0: + # Add status, but don't raise + new_status = (rmpdec.MPD_Float_operation | + rffi.cast(lltype.Signed, context.ctx.c_status)) + context.ctx.c_status = rffi.cast(rffi.UINT, new_status) + w_w = decimal_from_float(space, None, w_w, context, exact=True) + else: + return space.w_NotImplemented, None, None + else: + return space.w_NotImplemented, None, None + return None, w_v, w_w + + def binary_number_method(space, mpd_func, w_x, w_y): context = interp_context.getcontext(space) @@ -684,6 +803,7 @@ __new__ = interp2app(descr_new_decimal), __str__ = interp2app(W_Decimal.descr_str), __repr__ = interp2app(W_Decimal.descr_repr), + __hash__ = interp2app(W_Decimal.descr_hash), __bool__ = interp2app(W_Decimal.descr_bool), __float__ = interp2app(W_Decimal.descr_float), __int__ = interp2app(W_Decimal.descr_int), diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -644,6 +644,180 @@ assert -Decimal(45) == Decimal(-45) assert abs(Decimal(45)) == abs(Decimal(-45)) + def test_hash_method(self): + + Decimal = self.decimal.Decimal + localcontext = self.decimal.localcontext + + def hashit(d): + a = hash(d) + b = d.__hash__() + assert a == b + return a + + #just that it's hashable + hashit(Decimal(23)) + hashit(Decimal('Infinity')) + hashit(Decimal('-Infinity')) + hashit(Decimal('nan123')) + hashit(Decimal('-NaN')) + + test_values = [Decimal(sign*(2**m + n)) + for m in [0, 14, 15, 16, 17, 30, 31, + 32, 33, 61, 62, 63, 64, 65, 66] + for n in range(-10, 10) + for sign in [-1, 1]] + test_values.extend([ + Decimal("-1"), # ==> -2 + Decimal("-0"), # zeros + Decimal("0.00"), + Decimal("-0.000"), + Decimal("0E10"), + Decimal("-0E12"), + Decimal("10.0"), # negative exponent + Decimal("-23.00000"), + Decimal("1230E100"), # positive exponent + Decimal("-4.5678E50"), + # a value for which hash(n) != hash(n % (2**64-1)) + # in Python pre-2.6 + Decimal(2**64 + 2**32 - 1), + # selection of values which fail with the old (before + # version 2.6) long.__hash__ + Decimal("1.634E100"), + Decimal("90.697E100"), + Decimal("188.83E100"), + Decimal("1652.9E100"), + Decimal("56531E100"), + ]) + + # check that hash(d) == hash(int(d)) for integral values + for value in test_values: + assert hashit(value) == hashit(int(value)) + + #the same hash that to an int + assert hashit(Decimal(23)) == hashit(23) + raises(TypeError, hash, Decimal('sNaN')) + assert hashit(Decimal('Inf')) + assert hashit(Decimal('-Inf')) + + # check that the hashes of a Decimal float match when they + # represent exactly the same values + test_strings = ['inf', '-Inf', '0.0', '-.0e1', + '34.0', '2.5', '112390.625', '-0.515625'] + for s in test_strings: + f = float(s) + d = Decimal(s) + assert hashit(f) == hashit(d) + + with localcontext() as c: + # check that the value of the hash doesn't depend on the + # current context (issue #1757) + x = Decimal("123456789.1") + + c.prec = 6 + h1 = hashit(x) + c.prec = 10 + h2 = hashit(x) + c.prec = 16 + h3 = hashit(x) + + assert h1 == h2 == h3 + + c.prec = 10000 + x = 1100 ** 1248 + assert hashit(Decimal(x)) == hashit(x) + + def test_float_comparison(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + FloatOperation = self.decimal.FloatOperation + localcontext = self.decimal.localcontext + + def assert_attr(a, b, attr, context, signal=None): + context.clear_flags() + f = getattr(a, attr) + if signal == FloatOperation: + raises(signal, f, b) + else: + assert f(b) is True + assert context.flags[FloatOperation] + + small_d = Decimal('0.25') + big_d = Decimal('3.0') + small_f = 0.25 + big_f = 3.0 + + zero_d = Decimal('0.0') + neg_zero_d = Decimal('-0.0') + zero_f = 0.0 + neg_zero_f = -0.0 + + inf_d = Decimal('Infinity') + neg_inf_d = Decimal('-Infinity') + inf_f = float('inf') + neg_inf_f = float('-inf') + + def doit(c, signal=None): + # Order + for attr in '__lt__', '__le__': + assert_attr(small_d, big_f, attr, c, signal) + + for attr in '__gt__', '__ge__': + assert_attr(big_d, small_f, attr, c, signal) + + # Equality + assert_attr(small_d, small_f, '__eq__', c, None) + + assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None) + assert_attr(neg_zero_d, zero_f, '__eq__', c, None) + + assert_attr(zero_d, neg_zero_f, '__eq__', c, None) + assert_attr(zero_d, zero_f, '__eq__', c, None) + + assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None) + assert_attr(inf_d, inf_f, '__eq__', c, None) + + # Inequality + assert_attr(small_d, big_f, '__ne__', c, None) + + assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None) + + assert_attr(neg_inf_d, inf_f, '__ne__', c, None) + assert_attr(inf_d, neg_inf_f, '__ne__', c, None) + + assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None) + + def test_containers(c, signal=None): + c.clear_flags() + s = set([100.0, Decimal('100.0')]) + assert len(s) == 1 + assert c.flags[FloatOperation] + + c.clear_flags() + if signal: + raises(signal, sorted, [1.0, Decimal('10.0')]) + else: + s = sorted([10.0, Decimal('10.0')]) + assert c.flags[FloatOperation] + + c.clear_flags() + b = 10.0 in [Decimal('10.0'), 1.0] + assert c.flags[FloatOperation] + + c.clear_flags() + b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'} + assert c.flags[FloatOperation] + + nc = Context() + with localcontext(nc) as c: + assert not c.traps[FloatOperation] + doit(c, signal=None) + test_containers(c, signal=None) + + c.traps[FloatOperation] = True + doit(c, signal=FloatOperation) + test_containers(c, signal=FloatOperation) + def test_nan_comparisons(self): import operator # comparisons involving signaling nans signal InvalidOperation diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -37,9 +37,10 @@ ], export_symbols=[ "mpd_qset_ssize", "mpd_qset_uint", "mpd_qset_string", + "mpd_qsset_ssize", "mpd_qget_ssize", "mpd_qcopy", "mpd_qncopy", "mpd_setspecial", "mpd_clear_flags", "mpd_qimport_u32", "mpd_qexport_u32", "mpd_qexport_u16", - "mpd_set_sign", "mpd_sign", "mpd_qfinalize", + "mpd_set_sign", "mpd_set_positive", "mpd_sign", "mpd_qfinalize", "mpd_getprec", "mpd_getemin", "mpd_getemax", "mpd_getround", "mpd_getclamp", "mpd_qsetprec", "mpd_qsetemin", "mpd_qsetemax", "mpd_qsetround", "mpd_qsetclamp", "mpd_maxcontext", @@ -151,6 +152,10 @@ 'mpd_qset_uint', [MPD_PTR, rffi.UINT, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qset_string = external( 'mpd_qset_string', [MPD_PTR, rffi.CCHARP, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qsset_ssize = external( + 'mpd_qsset_ssize', [MPD_PTR, rffi.SSIZE_T, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qget_ssize = external( + 'mpd_qget_ssize', [MPD_PTR, rffi.UINTP], rffi.SSIZE_T) mpd_qimport_u32 = external( 'mpd_qimport_u32', [ MPD_PTR, rffi.UINTP, rffi.SIZE_T, @@ -171,6 +176,8 @@ 'mpd_setspecial', [MPD_PTR, rffi.UCHAR, rffi.UCHAR], lltype.Void) mpd_set_sign = external( 'mpd_set_sign', [MPD_PTR, rffi.UCHAR], lltype.Void) +mpd_set_positive = external( + 'mpd_set_positive', [MPD_PTR], lltype.Void) mpd_clear_flags = external( 'mpd_clear_flags', [MPD_PTR], lltype.Void) mpd_sign = external( From noreply at buildbot.pypy.org Wed May 21 01:30:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 21 May 2014 01:30:13 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20140520233013.BC2311C1106@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71625:eb55b133230a Date: 2014-05-20 15:39 -0700 http://bitbucket.org/pypy/pypy/changeset/eb55b133230a/ Log: 2to3 diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -149,7 +149,7 @@ self._check_closed() lib.gdbm_sync(self.ll_dbm) -def open(filename, flags='r', mode=0666): +def open(filename, flags='r', mode=0o666): if flags[0] == 'r': iflags = lib.GDBM_READER elif flags[0] == 'w': diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -270,7 +270,7 @@ try: if termios.TIOCSWINSZ < 0: set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ - set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffff else: set_winsz_opcode_pos = termios.TIOCSWINSZ set_winsz_opcode_maybe_neg, = struct.unpack("i", From noreply at buildbot.pypy.org Wed May 21 01:42:25 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 May 2014 01:42:25 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: initial checkin for my talk Message-ID: <20140520234225.CCA0D1C0299@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5255:e5afa1fc9e45 Date: 2014-05-20 17:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/e5afa1fc9e45/ Log: initial checkin for my talk diff --git a/talk/rst2beamer-template/Makefile b/talk/pycon-italy-2014/Makefile copy from talk/rst2beamer-template/Makefile copy to talk/pycon-italy-2014/Makefile diff --git a/talk/rst2beamer-template/author.latex b/talk/pycon-italy-2014/author.latex copy from talk/rst2beamer-template/author.latex copy to talk/pycon-italy-2014/author.latex --- a/talk/rst2beamer-template/author.latex +++ b/talk/pycon-italy-2014/author.latex @@ -1,8 +1,8 @@ \definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} -\title[PyPy: becoming fast]{PyPy: becoming fast} -\author[antocuni, cfbolz, pedronis] -{Antonio Cuni \\ Carl Friedrich Bolz\\ Samuele Pedroni} +\title[PyPy Status]{PyPy Status} +\author[antocuni] +{Antonio Cuni} -\institute{EuroPython 2009} -\date{June 30 2009} +\institute{PyCon Cinque} +\date{May 24 2014} diff --git a/talk/rst2beamer-template/beamerdefs.txt b/talk/pycon-italy-2014/beamerdefs.txt copy from talk/rst2beamer-template/beamerdefs.txt copy to talk/pycon-italy-2014/beamerdefs.txt diff --git a/talk/rst2beamer-template/stylesheet.latex b/talk/pycon-italy-2014/stylesheet.latex copy from talk/rst2beamer-template/stylesheet.latex copy to talk/pycon-italy-2014/stylesheet.latex diff --git a/talk/rst2beamer-template/talk.pdf.info b/talk/pycon-italy-2014/talk.pdf.info copy from talk/rst2beamer-template/talk.pdf.info copy to talk/pycon-italy-2014/talk.pdf.info diff --git a/talk/rst2beamer-template/talk.txt b/talk/pycon-italy-2014/talk.txt copy from talk/rst2beamer-template/talk.txt copy to talk/pycon-italy-2014/talk.txt diff --git a/talk/rst2beamer-template/title.latex b/talk/pycon-italy-2014/title.latex copy from talk/rst2beamer-template/title.latex copy to talk/pycon-italy-2014/title.latex From noreply at buildbot.pypy.org Wed May 21 01:42:27 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 May 2014 01:42:27 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: title slide Message-ID: <20140520234227.07B291C0299@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5256:d982f5e27f71 Date: 2014-05-20 17:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/d982f5e27f71/ Log: title slide diff --git a/talk/pycon-italy-2014/Makefile b/talk/pycon-italy-2014/Makefile --- a/talk/pycon-italy-2014/Makefile +++ b/talk/pycon-italy-2014/Makefile @@ -5,9 +5,9 @@ # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 talk.pdf: talk.txt author.latex title.latex stylesheet.latex - rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt talk.txt talk.latex || exit + /home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.txt talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit - sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit pdflatex talk.latex || exit view: talk.pdf diff --git a/talk/pycon-italy-2014/author.latex b/talk/pycon-italy-2014/author.latex --- a/talk/pycon-italy-2014/author.latex +++ b/talk/pycon-italy-2014/author.latex @@ -1,8 +1,8 @@ \definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} -\title[PyPy Status]{PyPy Status} +\title[PyPy Status]{PyPy Status\\\small{(no, PyPy is not dead)}} \author[antocuni] {Antonio Cuni} \institute{PyCon Cinque} -\date{May 24 2014} +\date{May 24, 2014} From noreply at buildbot.pypy.org Wed May 21 01:42:28 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 May 2014 01:42:28 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <20140520234228.2D3521C0299@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5257:13d1a92ab9dd Date: 2014-05-21 01:32 +0200 http://bitbucket.org/pypy/extradoc/changeset/13d1a92ab9dd/ Log: more slides diff --git a/talk/pycon-italy-2014/Makefile b/talk/pycon-italy-2014/Makefile --- a/talk/pycon-italy-2014/Makefile +++ b/talk/pycon-italy-2014/Makefile @@ -4,8 +4,8 @@ # WARNING: to work, it needs this patch for docutils # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 -talk.pdf: talk.txt author.latex title.latex stylesheet.latex - /home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.txt talk.latex || exit +talk.pdf: talk.rst author.latex title.latex stylesheet.latex + /home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit pdflatex talk.latex || exit diff --git a/talk/pycon-italy-2014/talk.pdf.info b/talk/pycon-italy-2014/talk.pdf.info --- a/talk/pycon-italy-2014/talk.pdf.info +++ b/talk/pycon-italy-2014/talk.pdf.info @@ -1,6 +1,6 @@ AvailableTransitions=[Crossfade] TransitionDuration = 100 -EstimatedDuration = 60*60 # in seconds +EstimatedDuration = 45*60 # in seconds MinutesOnly = True PageProps = { diff --git a/talk/pycon-italy-2014/talk.txt b/talk/pycon-italy-2014/talk.rst rename from talk/pycon-italy-2014/talk.txt rename to talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.txt +++ b/talk/pycon-italy-2014/talk.rst @@ -1,7 +1,118 @@ .. include:: beamerdefs.txt ================================ -Title +PyPy Status ================================ -XXX +About me +--------- + +- PyPy core dev + +- ``pdb++``, ``fancycompleter``, ... + +- Consultant, trainer + +- http://antocuni.eu + + +PyPy is not dead +---------------- + +- No PyPy status talk at EuroPython 2013 + + * for the first time since 2004! + + * for no good reason :) + +- PyPy is healthy and alive + + +What is PyPy? +-------------- + +* RPython toolchain + + - subset of Python + + - ideal for writing VMs + + - JIT & GC for free + +* Python interpreter + + - written in RPython + + - **FAST** + +* Whatever (dynamic) language you want + + - smalltalk, prolog, PHP, javascript, ... + + +PyPy: past two years (1) +----------------------------- + +- PyPy 2.0 (May 2013) + + * beta ARM, CFFI, unicode performance + + * stackless + JIT (eventlet, gevent, ...) + +|pause| + +- PyPy 2.1 (July 2013) + + * stable ARM (thanks to Raspberry Pi foundation) + + * py3k (3.2.3), numpy, general improvements, bugfixes + +|pause| + +- PyPy 2.2 (November 2013) + + * incremental GC, faster JSON + + * more JIT, more py3k + + * more numpy, numpy C API + +PyPy: past two years (2) +------------------------- + +- PyPy 2.3 (May 2014) + +- Lot of internal refactoring + +- C API for embedding + + * pypy + uWSGI (thanks to Roberto De Ioris) + +- the usual, boring, general improvements + + +More PyPy-powered languages +---------------------------- + +- RPython: general framework for dynamic languages + +- Topaz: implementing Ruby + + * https://github.com/topazproject/topaz + +- HippyVM: implementing PHP + + * http://hippyvm.com/ + + +Current status: speed +--------------------- + + + + + + +- future: STM + +- Q&A From noreply at buildbot.pypy.org Wed May 21 01:42:40 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 May 2014 01:42:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <20140520234240.3B5C31C0299@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5258:4922c6c8f204 Date: 2014-05-21 01:42 +0200 http://bitbucket.org/pypy/extradoc/changeset/4922c6c8f204/ Log: more slides diff --git a/talk/pycon-italy-2014/talk.rst b/talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/pycon-italy-2014/talk.rst @@ -105,13 +105,39 @@ * http://hippyvm.com/ -Current status: speed ---------------------- +Current status +--------------- +- Python code: "it just works" +- C code: better than ever! + * cpyext: more complete, but still slow + * CFFI: the future + * Native PyPy C API for embedding + +- Lots of CFFI modules around: + + * pygame_cffi, psycopg2_cffi, lxml (in-progress) + +- numpy: in-progress, tons of code works out of the box + + * no scipy yet :-/ + + +Speed: 6.3x faster than CPython +-------------------------------- + +.. image:: speed.png + :scale: 47% + + + + +xxx +---- - future: STM From noreply at buildbot.pypy.org Wed May 21 05:08:41 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 21 May 2014 05:08:41 +0200 (CEST) Subject: [pypy-commit] pypy default: don't require a bookkeeper to create an ArgumentsForTranslation object Message-ID: <20140521030841.C09E71C3331@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71626:48e079c6da38 Date: 2014-05-21 04:07 +0100 http://bitbucket.org/pypy/pypy/changeset/48e079c6da38/ Log: don't require a bookkeeper to create an ArgumentsForTranslation object diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -10,6 +10,7 @@ c_last_exception, checkgraph) from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature +from rpython.annotator.argument import simple_args from rpython.annotator.bookkeeper import Bookkeeper import py @@ -91,7 +92,7 @@ def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) - args = self.bookkeeper.build_args("simple_call", args_s[:]) + args = simple_args(args_s) result = [] def schedule(graph, inputcells): result.append((graph, inputcells)) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -171,6 +171,12 @@ def rawshape(args): return args._rawshape() +def simple_args(args_s): + return ArgumentsForTranslation(list(args_s)) + +def complex_args(args_s): + return ArgumentsForTranslation.fromshape(args_s[0].const, + list(args_s[1:])) # # ArgErr family of exceptions raised in case of argument mismatch. diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -18,7 +18,7 @@ from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import ArgumentsForTranslation +from rpython.annotator.argument import simple_args, complex_args from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper import extregistry @@ -538,7 +538,7 @@ del emulated_pbc_calls[other_key] emulated_pbc_calls[unique_key] = pbc, args_s - args = self.build_args("simple_call", args_s) + args = simple_args(args_s) if callback is None: emulated = True else: @@ -564,11 +564,9 @@ def build_args(self, op, args_s): if op == "simple_call": - return ArgumentsForTranslation(list(args_s)) + return simple_args(args_s) elif op == "call_args": - return ArgumentsForTranslation.fromshape( - args_s[0].const, # shape - list(args_s[1:])) + return complex_args(args_s) def ondegenerated(self, what, s_value, where=None, called_from_graph=None): self.annotator.ondegenerated(what, s_value, where=where, diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -15,6 +15,7 @@ from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? from rpython.annotator.model import AnnotatorError +from rpython.annotator.argument import simple_args, complex_args UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) @@ -133,10 +134,10 @@ return self # default unbound __get__ implementation def simple_call(self, *args_s): - return self.call(getbookkeeper().build_args("simple_call", args_s)) + return self.call(simple_args(args_s)) def call_args(self, *args_s): - return self.call(getbookkeeper().build_args("call_args", args_s)) + return self.call(complex_args(args_s)) def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") @@ -687,14 +688,14 @@ bk = getbookkeeper() # record for calltables bk.emulate_pbc_call(bk.position_key, s_iterable, []) - return s_iterable.call(bk.build_args("simple_call", [])) + return s_iterable.call(simple_args([])) def next(self): s_next = self._true_getattr('next') bk = getbookkeeper() # record for calltables bk.emulate_pbc_call(bk.position_key, s_next, []) - return s_next.call(bk.build_args("simple_call", [])) + return s_next.call(simple_args([])) class __extend__(SomeBuiltin): def simple_call(self, *args): diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -2,6 +2,7 @@ from rpython.annotator import model as annmodel, description from rpython.flowspace.model import Constant +from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError @@ -290,7 +291,7 @@ bk = self.rtyper.annotator.bookkeeper descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site - args = bk.build_args("simple_call", args_s) + args = simple_args(args_s) shape, index = vfcs(bk, self.callfamily, descs, args, op) funcdesc, = descs row_of_one_graph = self.callfamily.calltables[shape][index] From noreply at buildbot.pypy.org Wed May 21 09:32:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 May 2014 09:32:01 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: Yet a different approach Message-ID: <20140521073201.A38B61C1106@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71627:fb7560c4e5f5 Date: 2014-05-20 18:08 +0200 http://bitbucket.org/pypy/pypy/changeset/fb7560c4e5f5/ Log: Yet a different approach diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -31,35 +31,19 @@ del self._ss_graph_marker del self._transforming_graph - def sanitize_graph(self, graph): - SSA_to_SSI(graph, self.translator.annotator) - - def ensure_ss_graph_marker(self): - if self._ss_graph_marker is None: - graph = self._transforming_graph - inputargs = [copyvar(self.translator.annotator, v) - for v in graph.startblock.inputargs] - hblock = Block(inputargs) - v_marker = varoftype(self.RPY_SHADOWSTACK_PTR) - hblock.operations.append(SpaceOperation('gc_ss_graph_marker', - [], v_marker)) - hblock.closeblock(Link(inputargs, graph.startblock)) - graph.startblock = hblock - self._ss_graph_marker = v_marker - return self._ss_graph_marker - def push_roots(self, hop, keep_current_args=False): livevars = self.get_livevars_for_roots(hop, keep_current_args) + if not livevars: + return [] self.num_pushs += len(livevars) - v_marker = self.ensure_ss_graph_marker() - hop.genop("gc_ss_store", [v_marker] + livevars) + hop.genop("gc_ss_store", livevars) return livevars def pop_roots(self, hop, livevars): # for moving collectors, reload the roots into the local variables - if self.gcdata.gc.moving_gc and livevars: - v_marker = self.ensure_ss_graph_marker() - hop.genop("gc_ss_reload", [v_marker] + livevars) + assert self.gcdata.gc.moving_gc, "XXX" + if livevars: + hop.genop("gc_ss_reload", livevars) class ShadowStackRootWalker(BaseRootWalker): diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -443,31 +443,26 @@ from rpython.memory.gctransform import shadowstack return shadowstack.ShadowStackFrameworkGCTransformer(self.db.translator) - def OP_GC_SS_GRAPH_MARKER(self, funcgen, op): - return '%s = rpy_shadowstack;' % funcgen.expr(op.result) - def OP_GC_SS_STORE(self, funcgen, op): - marker = funcgen.expr(op.args[0]) lines = [] - for i, v in enumerate(op.args[1:]): - lines.append('%s[%d].s = %s;' % (marker, i, funcgen.expr(v))) - lines.append('rpy_shadowstack = %s + %d;' % (marker, len(op.args) - 1)) + for i, v in enumerate(op.args): + lines.append('rpy_shadowstack[%d].s = %s;' % (i, funcgen.expr(v))) + lines.append('rpy_shadowstack += %d;' % len(op.args)) return '\n'.join(lines) def OP_GC_SS_RELOAD(self, funcgen, op): - marker = funcgen.expr(op.args[0]) - lines = [] - for i, v in enumerate(op.args[1:]): + revlines = [] + for i, v in enumerate(op.args): typename = funcgen.db.gettype(v.concretetype) - lines.append('%s = (%s)%s[%d].s;' % ( + revlines.append('%s = (%s)rpy_shadowstack[%d].s;' % ( funcgen.expr(v), cdecl(typename, ''), - marker, i)) if isinstance(v, Constant): - lines[-1] = '/* %s */' % lines[-1] - lines.reverse() - return '\n'.join(lines) + revlines[-1] = '/* %s */' % revlines[-1] + revlines.append('rpy_shadowstack -= %d;' % len(op.args)) + revlines.reverse() + return '\n'.join(revlines) class AsmGcRootFrameworkGcPolicy(BasicFrameworkGcPolicy): From noreply at buildbot.pypy.org Wed May 21 10:44:11 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 21 May 2014 10:44:11 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <20140521084411.392E61C011F@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5259:28457fcd20aa Date: 2014-05-21 10:43 +0200 http://bitbucket.org/pypy/extradoc/changeset/28457fcd20aa/ Log: tweaks diff --git a/talk/pycon-italy-2014/talk.rst b/talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/pycon-italy-2014/talk.rst @@ -98,6 +98,8 @@ - Topaz: implementing Ruby + * most of the language implemented, "definitely faster than MRI" + * https://github.com/topazproject/topaz - HippyVM: implementing PHP @@ -120,7 +122,7 @@ - Lots of CFFI modules around: - * pygame_cffi, psycopg2_cffi, lxml (in-progress) + * pygame_cffi, psycopg2_cffi, lxml - numpy: in-progress, tons of code works out of the box @@ -134,6 +136,16 @@ :scale: 47% +Current status +--------------- + +- ARM + +- CFFI + +- numpy + +- py3k xxx From noreply at buildbot.pypy.org Wed May 21 12:39:46 2014 From: noreply at buildbot.pypy.org (Stefan Marr) Date: Wed, 21 May 2014 12:39:46 +0200 (CEST) Subject: [pypy-commit] pypy default: (cfbolz, smarr): optimize int_sub_ovf(x, x) to 0 Message-ID: <20140521103946.83DDC1C011F@cobra.cs.uni-duesseldorf.de> Author: Stefan Marr Branch: Changeset: r71628:919ab79173d3 Date: 2014-05-21 11:33 +0100 http://bitbucket.org/pypy/pypy/changeset/919ab79173d3/ Log: (cfbolz, smarr): optimize int_sub_ovf(x, x) to 0 diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -198,11 +198,11 @@ opnum = lastop.getopnum() args = lastop.getarglist() result = lastop.result - # If the INT_xxx_OVF was replaced with INT_xxx, then we can kill - # the GUARD_NO_OVERFLOW. - if (opnum == rop.INT_ADD or - opnum == rop.INT_SUB or - opnum == rop.INT_MUL): + # If the INT_xxx_OVF was replaced with INT_xxx or removed + # completely, then we can kill the GUARD_NO_OVERFLOW. + if (opnum != rop.INT_ADD_OVF and + opnum != rop.INT_SUB_OVF and + opnum != rop.INT_MUL_OVF): return # Else, synthesize the non overflowing op for optimize_default to # reuse, as well as the reverse op @@ -248,6 +248,9 @@ def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) + if v1 is v2: + self.make_constant_int(op.result, 0) + return resbound = v1.intbound.sub_bound(v2.intbound) if resbound.bounded(): op = op.copy_and_change(rop.INT_SUB) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3339,6 +3339,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_int_sub_ovf_xx(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, i0) + guard_no_overflow() [] + escape(i1) + jump(i1) + """ + expected = """ + [] + escape(0) + jump() + """ + self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): ops = """ [i0] From noreply at buildbot.pypy.org Wed May 21 18:33:10 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 21 May 2014 18:33:10 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: horrible conversion to 3-state cards (cards get reset too often and it's generally messy) Message-ID: <20140521163310.C53431C350E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1225:4ccf9352f725 Date: 2014-05-21 18:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/4ccf9352f725/ Log: horrible conversion to 3-state cards (cards get reset too often and it's generally messy) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -43,7 +43,18 @@ static void _stm_mark_card(object_t *obj, uintptr_t card_index) { assert(card_index > 0); - dprintf(("mark %p card %lu\n", obj, card_index)); + + assert(obj->stm_flags & GCFLAG_HAS_CARDS); + assert(!(obj->stm_flags & GCFLAG_SMALL_UNIFORM)); /* not supported/tested */ +#ifndef NDEBUG + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + assert(size >= 32); + /* we need at least one lock in addition to the STM-reserved object write-lock */ +#endif + + dprintf(("mark %p card %lu with %d\n", obj, card_index, CARD_MARKED)); if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { /* not yet in the list */ @@ -60,19 +71,18 @@ We already own the object here or it is an overflow obj. */ uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj) + card_index; - assert(write_locks[card_lock_idx] == 0 - || write_locks[card_lock_idx] == STM_PSEGMENT->write_lock_num); + assert(write_locks[get_write_lock_idx((uintptr_t)obj)] == 0 /* overflow obj */ + || write_locks[get_write_lock_idx((uintptr_t)obj)] == STM_PSEGMENT->write_lock_num); assert(get_write_lock_idx((uintptr_t)obj) != card_lock_idx); - if (!write_locks[card_lock_idx]) - write_locks[card_lock_idx] = STM_PSEGMENT->write_lock_num; + if (write_locks[card_lock_idx] != CARD_MARKED) + write_locks[card_lock_idx] = CARD_MARKED; } static bool _stm_write_slowpath_overflow_objs(object_t *obj, uintptr_t card_index) { /* is this an object from the same transaction, outside the nursery? */ - if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) - == STM_PSEGMENT->overflow_number) { + if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); @@ -542,6 +552,7 @@ STM_PSEGMENT->marker_inev[1] = 0; /* reset these lists to NULL for the next transaction */ + _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); @@ -681,6 +692,10 @@ static void abort_data_structures_from_segment_num(int segment_num) { +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT /* This function clears the content of the given segment undergoing an abort. It is called from abort_with_mutex(), but also sometimes from other threads that figure out that this segment should abort. @@ -725,6 +740,8 @@ LIST_FREE(pseg->old_objects_with_cards); LIST_FREE(pseg->large_overflow_objects); list_clear(pseg->young_weakrefs); +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } static void abort_with_mutex(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -223,9 +223,19 @@ static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; +enum /* card values for write_locks */ { + CARD_CLEAR = 0, /* card not used at all */ + CARD_MARKED = 100, /* card marked for tracing in the next gc */ + CARD_MARKED_OLD = 101, /* card was marked before, but cleared + in a GC */ +}; + #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) +#define IS_OVERFLOW_OBJ(pseg, obj) ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) \ + == pseg->overflow_number) + static inline uintptr_t get_card_index(uintptr_t byte_offset) { assert(_STM_CARD_SIZE == 32); return (byte_offset >> 5) + 1; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -440,6 +440,11 @@ static void clean_up_segment_lists(void) { +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + long i; for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); @@ -455,15 +460,28 @@ */ lst = pseg->objects_pointing_to_nursery; if (lst != NULL) { - LIST_FOREACH_R(lst, uintptr_t /*item*/, + LIST_FOREACH_R(lst, object_t* /*item*/, ({ struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); + REAL_ADDRESS(pseg->pub.segment_base, (uintptr_t)item); assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER)); OPT_ASSERT(!(realobj->stm_flags & GCFLAG_CARDS_SET)); realobj->stm_flags |= GCFLAG_WRITE_BARRIER; + /* XXX: this will be necessary when only synchronising cards */ + + if (realobj->stm_flags & GCFLAG_HAS_CARDS) { + /* We called a normal WB on these objs. If we wrote + a value to some place in them, we need to + synchronise the whole object on commit */ + if (IS_OVERFLOW_OBJ(pseg, realobj)) { + /* we do not need the old cards for overflow objects */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } else { + _reset_object_cards(pseg, item, CARD_MARKED_OLD, true); /* mark all */ + } + } })); list_clear(lst); @@ -473,19 +491,19 @@ struct object_s *realobj = (struct object_s *) REAL_ADDRESS(pseg->pub.segment_base, item); OPT_ASSERT(realobj->stm_flags & GCFLAG_CARDS_SET); - _reset_object_cards(&pseg->pub, item); + OPT_ASSERT(realobj->stm_flags & GCFLAG_WRITE_BARRIER); + + /* XXX: this will be necessary when only synchronising cards */ + uint8_t mark_value = IS_OVERFLOW_OBJ(pseg, realobj) ? + CARD_CLEAR : CARD_MARKED_OLD; + _reset_object_cards(pseg, item, mark_value, false); })); list_clear(lst); + } else { - LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, - { - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); - - if (realobj->stm_flags & GCFLAG_CARDS_SET) { - _reset_object_cards(&pseg->pub, item); - } - }); + /* if here MINOR_NOTHING_TO_DO() was true before, it's like + we "didn't do a collection" at all. So nothing to do on + modified_old_objs. */ } /* Remove from 'large_overflow_objects' all objects that die */ @@ -500,6 +518,8 @@ } } } +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } static inline bool largemalloc_keep_object_at(char *data) @@ -513,6 +533,17 @@ _stm_largemalloc_sweep(); } +static void assert_cleared_locks(size_t n) +{ +#ifndef NDEBUG + size_t i; + uint8_t *s = write_locks; + for (i = 0; i < n; i++) + assert(s[i] == CARD_CLEAR || s[i] == CARD_MARKED + || s[i] == CARD_MARKED_OLD); +#endif +} + static void clean_write_locks(void) { /* the write_locks array, containing the visit marker during @@ -522,7 +553,7 @@ object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; - assert_memset_zero(write_locks, lock2_idx); + assert_cleared_locks(lock2_idx); memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -183,30 +183,96 @@ minor_trace_if_young(&tl->thread_local_obj); } -static void _reset_object_cards(struct stm_segment_info_s *seg, object_t *obj) +static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj) +{ +#ifndef NDEBUG + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(pseg->pub.segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + + if (!(realobj->stm_flags & GCFLAG_HAS_CARDS)) + return; + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_card_index(size - 1); + + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS_MAX + || write_locks[first_card_index] == 255); /* see gcpage.c */ + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + assert(write_locks[card_lock_idx] == CARD_CLEAR); + card_index++; + } + + assert(!(realobj->stm_flags & GCFLAG_CARDS_SET)); +#endif +} + +static void _verify_cards_cleared_in_all_lists(struct stm_priv_segment_info_s *pseg) +{ +#ifndef NDEBUG + LIST_FOREACH_R( + pseg->modified_old_objects, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + + if (pseg->large_overflow_objects) { + LIST_FOREACH_R( + pseg->large_overflow_objects, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + } + if (pseg->objects_pointing_to_nursery) { + LIST_FOREACH_R( + pseg->objects_pointing_to_nursery, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + } + if (pseg->old_objects_with_cards) { + LIST_FOREACH_R( + pseg->old_objects_with_cards, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + } +#endif +} + +static void _reset_object_cards(struct stm_priv_segment_info_s *pseg, + object_t *obj, uint8_t mark_value, + bool mark_all) { #pragma push_macro("STM_PSEGMENT") #pragma push_macro("STM_SEGMENT") #undef STM_PSEGMENT #undef STM_SEGMENT - struct object_s *realobj = (struct object_s *)REAL_ADDRESS(seg->segment_base, obj); + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(pseg->pub.segment_base, obj); size_t size = stmcb_size_rounded_up(realobj); + OPT_ASSERT(size >= 32); + assert(realobj->stm_flags & GCFLAG_HAS_CARDS); + assert(IMPLY(mark_value == CARD_CLEAR, !mark_all)); /* not necessary */ + assert(IMPLY(mark_all, mark_value == CARD_MARKED_OLD)); /* set *all* to OLD */ + assert(IMPLY(IS_OVERFLOW_OBJ(pseg, realobj), + mark_value == CARD_CLEAR)); /* overflows are always CLEARed */ + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; uintptr_t last_card_index = get_card_index(size - 1); + dprintf(("mark cards of %p, size %lu with %d\n", obj, size, mark_value)); + + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS + || write_locks[first_card_index] == 255); /* see gcpage.c */ while (card_index <= last_card_index) { - #ifndef NDEBUG - if (write_locks[first_card_index + card_index]) - dprintf(("cleared card %lu on %p\n", card_index, obj)); - #endif - write_locks[first_card_index + card_index] = 0; + uintptr_t card_lock_idx = first_card_index + card_index; + + if (mark_all || write_locks[card_lock_idx] != CARD_CLEAR) { + /* dprintf(("mark card %lu,wl:%lu of %p with %d\n", */ + /* card_index, card_lock_idx, obj, mark_value)); */ + write_locks[card_lock_idx] = mark_value; + } card_index++; } + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS + || write_locks[first_card_index] == 255); /* see gcpage.c */ realobj->stm_flags &= ~GCFLAG_CARDS_SET; - dprintf(("reset cards on %p\n", obj)); #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") } @@ -218,11 +284,10 @@ also gives the obj-base */ assert(_card_base_obj); uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)_card_base_obj); - uintptr_t card_lock_idx = base_lock_idx; - card_lock_idx += get_card_index( + uintptr_t card_lock_idx = base_lock_idx + get_card_index( (uintptr_t)((char*)pobj - STM_SEGMENT->segment_base) - (uintptr_t)_card_base_obj); - if (write_locks[card_lock_idx]) { + if (write_locks[card_lock_idx] == CARD_MARKED) { dprintf(("minor_trace_if_young_cards: trace %p\n", *pobj)); minor_trace_if_young(pobj); } @@ -234,13 +299,17 @@ _card_base_obj = obj; assert(!_is_in_nursery(obj)); assert(obj->stm_flags & GCFLAG_CARDS_SET); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); dprintf(("_trace_card_object(%p)\n", obj)); char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); - _reset_object_cards(get_segment(STM_SEGMENT->segment_num), obj); + bool obj_is_overflow = IS_OVERFLOW_OBJ(STM_PSEGMENT, obj); + uint8_t mark_value = obj_is_overflow ? CARD_CLEAR : CARD_MARKED_OLD; + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, mark_value, false); /* mark marked */ } @@ -259,8 +328,15 @@ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); obj->stm_flags |= GCFLAG_WRITE_BARRIER; - if (obj->stm_flags & GCFLAG_CARDS_SET) { - _reset_object_cards(get_segment(STM_SEGMENT->segment_num), obj); + if (obj->stm_flags & GCFLAG_HAS_CARDS) { + if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { + /* we do not need the old cards for overflow objects */ + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, CARD_CLEAR, false); + } else { + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, CARD_MARKED_OLD, true); /* mark all */ + } } } if (obj->stm_flags & GCFLAG_CARDS_SET) { _trace_card_object(obj); @@ -298,13 +374,18 @@ WRITE_BARRIER flag and traced into it to fix its content); or add the object to 'large_overflow_objects'. */ + struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num); if (STM_PSEGMENT->minor_collect_will_commit_now) { acquire_privatization_lock(); synchronize_object_now(obj); release_privatization_lock(); + if (obj->stm_flags & GCFLAG_HAS_CARDS) { + _reset_object_cards(pseg, obj, CARD_CLEAR, false); /* was young */ + } } else { LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } + _cards_cleared_in_object(pseg, obj); } /* the list could have moved while appending */ @@ -368,7 +449,15 @@ wlog_t *item; TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { - assert(!_is_in_nursery((object_t *)item->addr)); + object_t *obj = (object_t*)item->addr; + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item->addr); + + assert(!_is_in_nursery(obj)); + if (realobj->stm_flags & GCFLAG_HAS_CARDS) + _reset_object_cards(pseg, obj, CARD_CLEAR, false); + _cards_cleared_in_object(pseg, obj); + _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; @@ -377,21 +466,37 @@ tree_clear(pseg->nursery_objects_shadows); - if (pseg->old_objects_with_cards) { - LIST_FOREACH_R(pseg->old_objects_with_cards, object_t * /*item*/, - _reset_object_cards(&pseg->pub, item)); - } else { - LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, - { - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); - if (realobj->stm_flags & GCFLAG_CARDS_SET) { - _reset_object_cards(&pseg->pub, item); - } - }); + /* nearly all objs in old_objects_with_cards are also in modified_old_objects, + so we don't need to go through both lists: */ + LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, + { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_HAS_CARDS) { + /* clear all possibly used cards in this transaction */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } + }); + /* overflow objects with cards are not in modified_old_objects */ + if (pseg->large_overflow_objects != NULL) { + /* some overflow objects may have cards, clear them too */ + LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, + { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { + /* CARDS_SET is enough since other HAS_CARDS objs + are already cleared */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } + }); } + _verify_cards_cleared_in_all_lists(pseg); + return nursery_used; #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -6,7 +6,9 @@ static uint32_t highest_overflow_number; -static void _reset_object_cards(struct stm_segment_info_s *seg, object_t *obj); +static void _reset_object_cards(struct stm_priv_segment_info_s *pseg, + object_t *obj, uint8_t mark_value, + bool mark_all); static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -271,7 +271,9 @@ /* directly after allocation one can enable card marking for any kind of object with stm_use_cards(obj). This enables the use of stm_write/read_card() barriers that do more fine-grained - conflict detection and garbage collection. */ + conflict detection and garbage collection. + These objects need to be at least 32bytes in size! +*/ __attribute__((always_inline)) static inline void stm_use_cards(object_t* o) { diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -227,14 +227,19 @@ self.root_numbering = 0 self.ref_type_map = {} self.root_sizes = {} + self.with_cards = {} - def get_new_root_name(self, is_ref_type, size): + def get_new_root_name(self, is_ref_type, size, with_cards): self.root_numbering += 1 r = "lp_%s_%d" % ("ref" if is_ref_type else "char", self.root_numbering) self.ref_type_map[r] = is_ref_type self.root_sizes[r] = size + self.with_cards[r] = with_cards return r + def has_cards(self, r): + return self.with_cards[r] + def has_ref_type(self, r): return self.ref_type_map[r] @@ -363,10 +368,11 @@ #"SOME_MEDIUM_SIZE+16", #"SOME_LARGE_SIZE+16", ]) - r = global_state.get_new_root_name(False, size) + with_cards = int(size) >= 32 + r = global_state.get_new_root_name(False, size, with_cards) thread_state.push_roots(ex) - ex.do('%s = stm_allocate(%s)' % (r, size)) + ex.do('%s = stm_allocate(%s, %s)' % (r, size, bool(with_cards))) ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, 0, True) @@ -376,9 +382,10 @@ def op_allocate_ref(ex, global_state, thread_state): num = str(global_state.rnd.randrange(1, 100)) - r = global_state.get_new_root_name(True, num) + with_cards = int(num) >= 4 + r = global_state.get_new_root_name(True, num, with_cards) thread_state.push_roots(ex) - ex.do('%s = stm_allocate_refs(%s, True)' % (r, num)) + ex.do('%s = stm_allocate_refs(%s, %s)' % (r, num, bool(with_cards))) ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, "ffi.NULL", True) @@ -410,6 +417,7 @@ r = thread_state.get_random_root() trs = thread_state.transaction_state is_ref = global_state.has_ref_type(r) + has_cards = global_state.has_cards(r) # # check for possible write-write conflict: was_written = False @@ -438,13 +446,13 @@ thread_state.abort_transaction() offset = global_state.get_root_size(r) + " - 1" if is_ref: - ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, "True")) + ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, has_cards)) if not aborts: - ex.do(raising_call(False, "stm_set_ref", r, "0", v, "True")) + ex.do(raising_call(False, "stm_set_ref", r, "0", v, has_cards)) else: - ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset)) + ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset, has_cards)) if not aborts: - ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR")) + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR", has_cards)) def op_read(ex, global_state, thread_state): r = thread_state.get_random_root() @@ -556,12 +564,12 @@ curr_thread = global_state.thread_states[0] for i in range(N_OBJECTS): - r = global_state.get_new_root_name(False, "384") - ex.do('%s = stm_allocate_old(384)' % r) + r = global_state.get_new_root_name(False, "384", True) + ex.do('%s = stm_allocate_old(384, True)' % r) global_state.committed_transaction_state.add_root(r, 0, False) global_state.prebuilt_roots.append(r) - r = global_state.get_new_root_name(True, "50") + r = global_state.get_new_root_name(True, "50", True) ex.do('%s = stm_allocate_old_refs(50, True)' % r) global_state.committed_transaction_state.add_root(r, "ffi.NULL", False) global_state.prebuilt_roots.append(r) From noreply at buildbot.pypy.org Wed May 21 18:49:37 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 21 May 2014 18:49:37 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: hg merge default Message-ID: <20140521164937.421A91C0299@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71629:0ea1c56e5066 Date: 2014-05-21 04:17 +0100 http://bitbucket.org/pypy/pypy/changeset/0ea1c56e5066/ Log: hg merge default diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -7,63 +7,80 @@ PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + pickle: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -72,35 +89,89 @@ @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + latex: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." man: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man" + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -18,11 +18,31 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) + +# -- Read The Docs theme config ------------------------------------------------ + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it + + # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', + 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -91,7 +111,7 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' +#html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,7 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py * wait for builds to complete, make sure there are no failures diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -1,19 +1,42 @@ Historical release notes ------------------------- +======================== + +Cpython 2.7 compatible versions +=============================== .. toctree:: + release-2.3.0.rst + release-2.2.1.rst + release-2.2.0.rst + release-2.1.0.rst + release-2.1.0-beta2.rst + release-2.1.0-beta1.rst + release-2.1.0.rst + release-2.0.2.rst + release-2.0.1.rst + release-2.0.0.rst + release-2.0.0-beta2.rst + release-2.0.0-beta1.rst + release-1.9.0.rst + release-1.8.0.rst + release-1.7.0.rst + release-1.6.0.rst + release-1.5.0.rst + release-1.4.1.rst + release-1.4.0beta.rst + release-1.4.0.rst + release-1.3.0.rst + release-1.2.0.rst + release-1.1.0.rst + release-1.0.0.rst + release-0.99.0.rst + release-0.9.0.rst + release-0.8.0.rst + release-0.7.0.rst release-0.6 - release-0.7.0.rst - release-0.8.0.rst - release-0.9.0.rst - release-0.99.0.rst - release-1.0.0.rst - release-1.1.0.rst - release-1.2.0.rst - release-1.3.0.rst - release-1.4.0.rst - release-1.4.0beta.rst - release-1.4.1.rst - release-1.5.0.rst - release-1.6.0.rst + +Cpython 3.2 compatible versions +=============================== +.. toctree:: + release-pypy3-2.1.0-beta1.rst diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat --- a/pypy/doc/make.bat +++ b/pypy/doc/make.bat @@ -2,11 +2,15 @@ REM Command file for Sphinx documentation -set SPHINXBUILD=sphinx-build +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help @@ -14,16 +18,25 @@ if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled goto end ) @@ -33,8 +46,34 @@ goto end ) + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 2> nul +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + + if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end @@ -42,13 +81,23 @@ if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end @@ -56,6 +105,7 @@ if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end @@ -63,6 +113,7 @@ if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. @@ -71,6 +122,7 @@ if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: @@ -80,15 +132,85 @@ goto end ) +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %BUILDDIR%/.. + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end @@ -96,6 +218,7 @@ if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. @@ -104,10 +227,27 @@ if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + :end diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.3.1.rst @@ -0,0 +1,13 @@ +======================= +What's new since PyPy 2.3? +======================= + +.. this is a revision shortly after release-2.3 +.. startrev: 394146e9bb67 + +Move builtin ``struct`` module to ``_struct`` to allow ``pypy "-m idlelib.idle"`` + +Support compilation with gcc-4.9 + +Fixes for issues #1769, #1764, #1762, #1752 + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,6 +1,13 @@ ======================= -What's new in PyPy 2.3+ +What's new in PyPy 2.4+ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: f556d32f8319 +.. startrev: b2cc67adbaad + +Added support for the stdlib gdbm module via cffi + +Annotator cleanups + +.. branch: release-2.3.x + diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -85,7 +85,7 @@ if softspace: stdout.write('\n') - except SystemExit, e: + except SystemExit as e: handle_sys_exit(e) except: display_exception() @@ -590,6 +590,11 @@ # handle the case where no command/filename/module is specified # on the command-line. + try: + from _ast import PyCF_ACCEPT_NULL_BYTES + except ImportError: + PyCF_ACCEPT_NULL_BYTES = 0 + # update sys.path *after* loading site.py, in case there is a # "site.py" file in the script's directory. Only run this if we're # executing the interactive prompt, if we're running a script we @@ -603,17 +608,17 @@ python_startup = readenv and os.getenv('PYTHONSTARTUP') if python_startup: try: - f = open(python_startup) - startup = f.read() - f.close() - except IOError, e: + with open(python_startup) as f: + startup = f.read() + except IOError as e: print >> sys.stderr, "Could not open PYTHONSTARTUP" print >> sys.stderr, "IOError:", e else: def run_it(): co_python_startup = compile(startup, python_startup, - 'exec') + 'exec', + PyCF_ACCEPT_NULL_BYTES) exec co_python_startup in mainmodule.__dict__ mainmodule.__file__ = python_startup run_toplevel(run_it) @@ -626,7 +631,8 @@ else: # If not interactive, just read and execute stdin normally. def run_it(): - co_stdin = compile(sys.stdin.read(), '', 'exec') + co_stdin = compile(sys.stdin.read(), '', 'exec', + PyCF_ACCEPT_NULL_BYTES) exec co_stdin in mainmodule.__dict__ mainmodule.__file__ = '' success = run_toplevel(run_it) @@ -660,7 +666,7 @@ args = (execfile, filename, mainmodule.__dict__) success = run_toplevel(*args) - except SystemExit, e: + except SystemExit as e: status = e.code if inspect_requested(): display_exception() @@ -676,7 +682,7 @@ readenv and os.getenv('PYPY_IRC_TOPIC')) success = run_toplevel(interactive_console, mainmodule, quiet=not irc_topic) - except SystemExit, e: + except SystemExit as e: status = e.code else: status = not success @@ -726,10 +732,10 @@ setup_bootstrap_path(executable) try: cmdline = parse_command_line(argv) - except CommandLineError, e: + except CommandLineError as e: print_error(str(e)) return 2 - except SystemExit, e: + except SystemExit as e: return e.code or 0 setup_and_fix_paths(**cmdline) return run_command_line(**cmdline) diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -22,3 +22,4 @@ PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 PyCF_ONLY_AST = 0x0400 +PyCF_ACCEPT_NULL_BYTES = 0x10000000 # PyPy only, for compile() diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -33,7 +33,7 @@ interpleveldefs = { # constants - '__debug__' : '(space.w_True)', # XXX + '__debug__' : '(space.w_True)', 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -24,7 +24,8 @@ """ ec = space.getexecutioncontext() if flags & ~(ec.compiler.compiler_flags | consts.PyCF_ONLY_AST | - consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8): + consts.PyCF_DONT_IMPLY_DEDENT | consts.PyCF_SOURCE_IS_UTF8 | + consts.PyCF_ACCEPT_NULL_BYTES): raise OperationError(space.w_ValueError, space.wrap("compile() unrecognized flags")) @@ -53,9 +54,10 @@ else: source = space.readbuf_w(w_source).as_str() - if '\x00' in source: - raise OperationError(space.w_TypeError, space.wrap( - "compile() expected string without null bytes")) + if not (flags & consts.PyCF_ACCEPT_NULL_BYTES): + if '\x00' in source: + raise OperationError(space.w_TypeError, space.wrap( + "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: code = ec.compiler.compile_to_ast(source, filename, mode, flags) diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -610,6 +610,16 @@ firstlineno = co.co_firstlineno assert firstlineno == 2 + def test_compile_null_bytes(self): + import _ast + raises(TypeError, compile, '\x00', 'mymod', 'exec', 0) + raises(SyntaxError, compile, '\x00', 'mymod', 'exec', + _ast.PyCF_ACCEPT_NULL_BYTES) + src = "#abc\x00def\n" + raises(TypeError, compile, src, 'mymod', 'exec') + raises(TypeError, compile, src, 'mymod', 'exec', 0) + compile(src, 'mymod', 'exec', _ast.PyCF_ACCEPT_NULL_BYTES) # works + def test_print_function(self): import __builtin__ import sys diff --git a/pypy/module/_ast/__init__.py b/pypy/module/_ast/__init__.py --- a/pypy/module/_ast/__init__.py +++ b/pypy/module/_ast/__init__.py @@ -6,6 +6,8 @@ interpleveldefs = { "PyCF_ONLY_AST" : "space.wrap(%s)" % consts.PyCF_ONLY_AST, + "PyCF_ACCEPT_NULL_BYTES": + "space.wrap(%s)" % consts.PyCF_ACCEPT_NULL_BYTES, "__version__" : "space.wrap('82160')", # from CPython's svn. } appleveldefs = {} diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -209,11 +209,13 @@ while size > 0: # "peeks" on the underlying stream to see how many chars # we can safely read without reading past an end-of-line - peeked = stream.peek() - pn = peeked.find("\n", 0, size) + startindex, peeked = stream.peek() + assert 0 <= startindex <= len(peeked) + endindex = startindex + size + pn = peeked.find("\n", startindex, endindex) if pn < 0: - pn = min(size-1, len(peeked)) - c = stream.read(pn + 1) + pn = min(endindex - 1, len(peeked)) + c = stream.read(pn - startindex + 1) if not c: break result.append(c) diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -458,9 +458,7 @@ return result def peek(self): - pos = self.pos - assert pos >= 0 - return self.buffer[pos:] + return (self.pos, self.buffer) def try_to_find_file_descriptor(self): return self.stream.try_to_find_file_descriptor() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -10,6 +10,7 @@ c_last_exception, checkgraph) from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature +from rpython.annotator.argument import simple_args from rpython.annotator.value import AnnotatedValue from rpython.annotator.bookkeeper import Bookkeeper @@ -92,7 +93,7 @@ def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) - args = self.bookkeeper.build_args("simple_call", args_s[:]) + args = simple_args(args_s) result = [] def schedule(graph, inputcells): result.append((graph, inputcells)) diff --git a/rpython/annotator/argument.py b/rpython/annotator/argument.py --- a/rpython/annotator/argument.py +++ b/rpython/annotator/argument.py @@ -171,6 +171,12 @@ def rawshape(args): return args._rawshape() +def simple_args(args_s): + return ArgumentsForTranslation(list(args_s)) + +def complex_args(args_s): + return ArgumentsForTranslation.fromshape(args_s[0].const, + list(args_s[1:])) # # ArgErr family of exceptions raised in case of argument mismatch. diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -8,7 +8,7 @@ SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, - SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, + SomeBuiltinMethod, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, @@ -697,15 +697,14 @@ return SomeIterator(s_cont, *iter1.variant) -class __extend__(pairtype(SomeBuiltin, SomeBuiltin)): - +class __extend__(pairtype(SomeBuiltinMethod, SomeBuiltinMethod)): def union((bltn1, bltn2)): if (bltn1.analyser != bltn2.analyser or - bltn1.methodname != bltn2.methodname or - bltn1.s_self is None or bltn2.s_self is None): + bltn1.methodname != bltn2.methodname): raise UnionError(bltn1, bltn2) s_self = unionof(bltn1.s_self, bltn2.s_self) - return SomeBuiltin(bltn1.analyser, s_self, methodname=bltn1.methodname) + return SomeBuiltinMethod(bltn1.analyser, s_self, + methodname=bltn1.methodname) @op.is_.register(SomePBC, SomePBC) def is__PBC_PBC(pbc1, pbc2): diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -18,7 +18,7 @@ from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import ArgumentsForTranslation +from rpython.annotator.argument import simple_args, complex_args from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper import extregistry @@ -538,7 +538,7 @@ del emulated_pbc_calls[other_key] emulated_pbc_calls[unique_key] = pbc, args_s - args = self.build_args("simple_call", args_s) + args = simple_args(args_s) if callback is None: emulated = True else: @@ -564,11 +564,9 @@ def build_args(self, op, args_s): if op == "simple_call": - return ArgumentsForTranslation(list(args_s)) + return simple_args(args_s) elif op == "call_args": - return ArgumentsForTranslation.fromshape( - args_s[0].const, # shape - list(args_s[1:])) + return complex_args(args_s) def ondegenerated(self, what, s_value, where=None, called_from_graph=None): self.annotator.ondegenerated(what, s_value, where=where, diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -14,6 +14,7 @@ from rpython.flowspace.model import Constant import rpython.rlib.rarithmetic import rpython.rlib.objectmodel +from rpython.annotator.model import AnnotatorError def constpropagate(func, args_s, s_result): @@ -211,7 +212,7 @@ def builtin_tuple(s_iterable): if isinstance(s_iterable, SomeTuple): return s_iterable - return AnnotatorError("tuple(): argument must be another tuple") + raise AnnotatorError("tuple(): argument must be another tuple") def builtin_list(s_iterable): if isinstance(s_iterable, SomeList): diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -536,7 +536,15 @@ class SomeBuiltinMethod(SomeBuiltin): """ Stands for a built-in method which has got special meaning """ - knowntype = MethodType + def __init__(self, analyser, s_self, methodname): + if isinstance(analyser, MethodType): + analyser = descriptor.InstanceMethod( + analyser.im_func, + analyser.im_self, + analyser.im_class) + self.analyser = analyser + self.s_self = s_self + self.methodname = methodname class SomeImpossibleValue(SomeObject): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -7,14 +7,15 @@ from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, - SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeType, s_ImpossibleValue, + SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, + SomeFloat, SomeIterator, SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? from rpython.annotator.model import AnnotatorError +from rpython.annotator.argument import simple_args, complex_args UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) @@ -106,7 +107,7 @@ except AttributeError: return None else: - return SomeBuiltin(analyser, self, name) + return SomeBuiltinMethod(analyser, self, name) def getattr(self, s_attr): # get a SomeBuiltin if the SomeObject has @@ -131,10 +132,10 @@ return self # default unbound __get__ implementation def simple_call(self, *args_s): - return self.call(getbookkeeper().build_args("simple_call", args_s)) + return self.call(simple_args(args_s)) def call_args(self, *args_s): - return self.call(getbookkeeper().build_args("call_args", args_s)) + return self.call(complex_args(args_s)) def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") @@ -684,31 +685,38 @@ bk = getbookkeeper() # record for calltables bk.emulate_pbc_call(bk.position_key, s_iterable, []) - return s_iterable.call(bk.build_args("simple_call", [])) + return s_iterable.call(simple_args([])) def next(self): s_next = self._true_getattr('next') bk = getbookkeeper() # record for calltables bk.emulate_pbc_call(bk.position_key, s_next, []) - return s_next.call(bk.build_args("simple_call", [])) + return s_next.call(simple_args([])) class __extend__(SomeBuiltin): + def simple_call(self, *args): + return self.analyser(*args) + + def call(self, args, implicit_init=False): + args_s, kwds = args.unpack() + # prefix keyword arguments with 's_' + kwds_s = {} + for key, s_value in kwds.items(): + kwds_s['s_'+key] = s_value + return self.analyser(*args_s, **kwds_s) + + +class __extend__(SomeBuiltinMethod): def _can_only_throw(self, *args): analyser_func = getattr(self.analyser, 'im_func', None) can_only_throw = getattr(analyser_func, 'can_only_throw', None) if can_only_throw is None or isinstance(can_only_throw, list): return can_only_throw - if self.s_self is not None: - return can_only_throw(self.s_self, *args) - else: - return can_only_throw(*args) + return can_only_throw(self.s_self, *args) def simple_call(self, *args): - if self.s_self is not None: - return self.analyser(self.s_self, *args) - else: - return self.analyser(*args) + return self.analyser(self.s_self, *args) simple_call.can_only_throw = _can_only_throw def call(self, args, implicit_init=False): @@ -717,10 +725,7 @@ kwds_s = {} for key, s_value in kwds.items(): kwds_s['s_'+key] = s_value - if self.s_self is not None: - return self.analyser(self.s_self, *args_s, **kwds_s) - else: - return self.analyser(*args_s, **kwds_s) + return self.analyser(self.s_self, *args_s, **kwds_s) class __extend__(SomePBC): diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1861,20 +1861,26 @@ #END MARKING elif self.gc_state == STATE_SWEEPING: # - # Walk all rawmalloced objects and free the ones that don't - # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. - limit = self.nursery_size // self.ac.page_size - remaining = self.free_unvisited_rawmalloc_objects_step(limit) - # - # Ask the ArenaCollection to visit a fraction of the objects. - # Free the ones that have not been visited above, and reset - # GCFLAG_VISITED on the others. Visit at most '3 * limit' - # pages minus the number of objects already visited above. - done = self.ac.mass_free_incremental(self._free_if_unvisited, - 2 * limit + remaining) + if self.raw_malloc_might_sweep.non_empty(): + # Walk all rawmalloced objects and free the ones that don't + # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. + # This limit is conservatively high enough to guarantee that + # a total object size of at least '3 * nursery_size' bytes + # is processed. + limit = 3 * self.nursery_size // self.small_request_threshold + self.free_unvisited_rawmalloc_objects_step(limit) + done = False # the 2nd half below must still be done + else: + # Ask the ArenaCollection to visit a fraction of the objects. + # Free the ones that have not been visited above, and reset + # GCFLAG_VISITED on the others. Visit at most '3 * + # nursery_size' bytes. + limit = 3 * self.nursery_size // self.ac.page_size + done = self.ac.mass_free_incremental(self._free_if_unvisited, + limit) # XXX tweak the limits above # - if remaining > 0 and done: + if done: self.num_major_collects += 1 # # We also need to reset the GCFLAG_VISITED on prebuilt GC objects. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -177,7 +177,7 @@ if intval < 0: sign = -1 - ival = r_uint(-intval) + ival = -r_uint(intval) elif intval > 0: sign = 1 ival = r_uint(intval) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -234,11 +234,12 @@ while True: # "peeks" on the underlying stream to see how many characters # we can safely read without reading past an end-of-line - peeked = self.peek() - pn = peeked.find("\n") + startindex, peeked = self.peek() + assert 0 <= startindex <= len(peeked) + pn = peeked.find("\n", startindex) if pn < 0: pn = len(peeked) - c = self.read(pn + 1) + c = self.read(pn - startindex + 1) if not c: break result.append(c) @@ -265,7 +266,7 @@ pass def peek(self): - return '' + return (0, '') def try_to_find_file_descriptor(self): return -1 @@ -553,7 +554,7 @@ else: difpos = offset if -self.pos <= difpos <= currentsize: - self.pos += difpos + self.pos += intmask(difpos) return if whence == 1: offset -= currentsize @@ -705,9 +706,7 @@ return "".join(chunks) def peek(self): - pos = self.pos - assert pos >= 0 - return self.buf[pos:] + return (self.pos, self.buf) write = PassThrough("write", flush_buffers=True) truncate = PassThrough("truncate", flush_buffers=True) @@ -970,12 +969,13 @@ while True: # "peeks" on the underlying stream to see how many characters # we can safely read without reading past an end-of-line - peeked = self.base.peek() - pn = peeked.find("\n") - pr = peeked.find("\r") + startindex, peeked = self.base.peek() + assert 0 <= startindex <= len(peeked) + pn = peeked.find("\n", startindex) + pr = peeked.find("\r", startindex) if pn < 0: pn = len(peeked) if pr < 0: pr = len(peeked) - c = self.read(min(pn, pr) + 1) + c = self.read(min(pn, pr) - startindex + 1) if not c: break result.append(c) @@ -1028,7 +1028,7 @@ self.buf = "" def peek(self): - return self.buf + return (0, self.buf) write = PassThrough("write", flush_buffers=True) truncate = PassThrough("truncate", flush_buffers=True) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -12,6 +12,7 @@ _store_digit, _mask_digit, InvalidEndiannessError, InvalidSignednessError) from rpython.rlib.rfloat import NAN from rpython.rtyper.test.test_llinterp import interpret +from rpython.translator.c.test.test_standalone import StandaloneTests class TestRLong(object): @@ -849,3 +850,17 @@ py.test.raises(InvalidSignednessError, i.tobytes, 3, 'little', signed=False) py.test.raises(OverflowError, i.tobytes, 2, 'little', signed=True) + +class TestTranslated(StandaloneTests): + + def test_gcc_4_9(self): + MIN = -sys.maxint-1 + + def entry_point(argv): + print rbigint.fromint(MIN+1)._digits + print rbigint.fromint(MIN)._digits + return 0 + + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('hi there') + assert data == '[%d]\n[0, 1]\n' % sys.maxint diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -834,6 +834,7 @@ def ll_stringslice_startonly(s1, start): return LLHelpers._ll_stringslice(s1, start, len(s1.chars)) + @signature(types.any(), types.int(), types.int(), returns=types.any()) def ll_stringslice_startstop(s1, start, stop): if jit.we_are_jitted(): if stop > len(s1.chars): diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -11,37 +11,32 @@ class __extend__(annmodel.SomeBuiltin): def rtyper_makerepr(self, rtyper): - if self.s_self is None: - # built-in function case - if not self.is_constant(): - raise TyperError("non-constant built-in function!") - return BuiltinFunctionRepr(self.const) - else: - # built-in method case - assert self.methodname is not None - result = BuiltinMethodRepr(rtyper, self.s_self, self.methodname) - return result + if not self.is_constant(): + raise TyperError("non-constant built-in function!") + return BuiltinFunctionRepr(self.const) + def rtyper_makekey(self): - if self.s_self is None: - # built-in function case + const = getattr(self, 'const', None) + if extregistry.is_registered(const): + const = extregistry.lookup(const) + return self.__class__, const - const = getattr(self, 'const', None) +class __extend__(annmodel.SomeBuiltinMethod): + def rtyper_makerepr(self, rtyper): + assert self.methodname is not None + result = BuiltinMethodRepr(rtyper, self.s_self, self.methodname) + return result - if extregistry.is_registered(const): - const = extregistry.lookup(const) - - return self.__class__, const - else: - # built-in method case - # NOTE: we hash by id of self.s_self here. This appears to be - # necessary because it ends up in hop.args_s[0] in the method call, - # and there is no telling what information the called - # rtype_method_xxx() will read from that hop.args_s[0]. - # See test_method_join in test_rbuiltin. - # There is no problem with self.s_self being garbage-collected and - # its id reused, because the BuiltinMethodRepr keeps a reference - # to it. - return (self.__class__, self.methodname, id(self.s_self)) + def rtyper_makekey(self): + # NOTE: we hash by id of self.s_self here. This appears to be + # necessary because it ends up in hop.args_s[0] in the method call, + # and there is no telling what information the called + # rtype_method_xxx() will read from that hop.args_s[0]. + # See test_method_join in test_rbuiltin. + # There is no problem with self.s_self being garbage-collected and + # its id reused, because the BuiltinMethodRepr keeps a reference + # to it. + return (self.__class__, self.methodname, id(self.s_self)) def call_args_expand(hop, takes_kwds = True): hop = hop.copy() diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -2,6 +2,7 @@ from rpython.annotator import model as annmodel, description from rpython.flowspace.model import Constant +from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError @@ -290,7 +291,7 @@ bk = self.rtyper.annotator.bookkeeper descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site - args = bk.build_args("simple_call", args_s) + args = simple_args(args_s) shape, index = vfcs(bk, self.callfamily, descs, args, op) funcdesc, = descs row_of_one_graph = self.callfamily.calltables[shape][index] From noreply at buildbot.pypy.org Wed May 21 18:49:38 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 21 May 2014 18:49:38 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: register annotators for op.simple_call and op.call_args Message-ID: <20140521164938.7E71F1C0299@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71630:e8f43ee8a19e Date: 2014-05-21 04:53 +0100 http://bitbucket.org/pypy/pypy/changeset/e8f43ee8a19e/ Log: register annotators for op.simple_call and op.call_args diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -44,6 +44,14 @@ return s_Bool contains_SomeObject.can_only_throw = [] + at op.simple_call.register(SomeObject) +def simple_call_SomeObject(func, *args): + return func.ann.call(simple_args([arg.ann for arg in args])) + + at op.call_args.register(SomeObject) +def call_args(func, *args): + return func.ann.call(complex_args([arg.ann for arg in args])) + class __extend__(SomeObject): def issubtype(self, s_cls): @@ -131,12 +139,6 @@ def bind_callables_under(self, classdef, name): return self # default unbound __get__ implementation - def simple_call(self, *args_s): - return self.call(simple_args(args_s)) - - def call_args(self, *args_s): - return self.call(complex_args(args_s)) - def call(self, args, implicit_init=False): raise AnnotatorError("Cannot prove that the object is callable") @@ -695,9 +697,6 @@ return s_next.call(simple_args([])) class __extend__(SomeBuiltin): - def simple_call(self, *args): - return self.analyser(*args) - def call(self, args, implicit_init=False): args_s, kwds = args.unpack() # prefix keyword arguments with 's_' From noreply at buildbot.pypy.org Wed May 21 19:51:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 21 May 2014 19:51:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k: ignore LocaleErrors Message-ID: <20140521175109.30F781C02F3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71631:a0805424875b Date: 2014-05-21 10:50 -0700 http://bitbucket.org/pypy/pypy/changeset/a0805424875b/ Log: ignore LocaleErrors diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -56,7 +56,10 @@ try: space.call_function(w_run_toplevel, w_call_startup_gateway) if rlocale.HAVE_LANGINFO: - rlocale.setlocale(rlocale.LC_ALL, '') + try: + rlocale.setlocale(rlocale.LC_ALL, '') + except rlocale.LocaleError: + pass w_executable = space.fsdecode(space.wrapbytes(argv[0])) w_argv = space.newlist([space.fsdecode(space.wrapbytes(s)) for s in argv[1:]]) From noreply at buildbot.pypy.org Wed May 21 22:03:37 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:03:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix 'str(buffer(array.array('i')))' if running untranslated. Message-ID: <20140521200337.9FB951C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r71633:212ca275373d Date: 2014-05-21 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/212ca275373d/ Log: Fix 'str(buffer(array.array('i')))' if running untranslated. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -2,7 +2,7 @@ from rpython.rlib import jit from rpython.rlib.buffer import Buffer -from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.annlowlevel import llstr @@ -616,6 +616,14 @@ if step == 1: data = self.array._charbuf_start() try: + if not we_are_translated(): + # rffi.ptradd(NULL, ...) doesn't work untranslated. + # It returns nonsense translated, but its return value is + # unused if size == 0, which is the case if data == NULL + if self.array._buffer_as_unsigned() == 0: + assert size == 0 + return '' + return rffi.charpsize2str(rffi.ptradd(data, start), size) finally: self.array._charbuf_stop() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1029,6 +1029,9 @@ raises(TypeError, "a[MyInt(0)]") raises(TypeError, "a[MyInt(0):MyInt(5)]") + def test_fresh_array_buffer_str(self): + assert str(buffer(self.array('i'))) == '' + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() From noreply at buildbot.pypy.org Wed May 21 22:03:38 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:03:38 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix 'str(buffer(array.array('i')))' if running untranslated. Message-ID: <20140521200338.D689B1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71634:61cb7d5b5ce5 Date: 2014-05-21 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/61cb7d5b5ce5/ Log: Fix 'str(buffer(array.array('i')))' if running untranslated. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -2,7 +2,7 @@ from rpython.rlib import jit from rpython.rlib.buffer import Buffer -from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.annlowlevel import llstr @@ -656,6 +656,14 @@ if step == 1: data = self.array._charbuf_start() try: + if not we_are_translated(): + # rffi.ptradd(NULL, ...) doesn't work untranslated. + # It returns nonsense translated, but its return value is + # unused if size == 0, which is the case if data == NULL + if self.array._buffer_as_unsigned() == 0: + assert size == 0 + return '' + return rffi.charpsize2str(rffi.ptradd(data, start), size) finally: self.array._charbuf_stop() diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1044,6 +1044,9 @@ raises(TypeError, "a[MyInt(0)]") raises(TypeError, "a[MyInt(0):MyInt(5)]") + def test_fresh_array_buffer_str(self): + assert str(buffer(self.array('i'))) == '' + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() From noreply at buildbot.pypy.org Wed May 21 22:03:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:03:40 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: 2to3 Message-ID: <20140521200340.196BE1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71635:2528ab74488e Date: 2014-05-21 22:02 +0200 http://bitbucket.org/pypy/pypy/changeset/2528ab74488e/ Log: 2to3 diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1045,7 +1045,7 @@ raises(TypeError, "a[MyInt(0):MyInt(5)]") def test_fresh_array_buffer_str(self): - assert str(buffer(self.array('i'))) == '' + assert memoryview(self.array('i')).tobytes() == b'' class AppTestArrayBuiltinShortcut(AppTestArray): From noreply at buildbot.pypy.org Wed May 21 22:14:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 21 May 2014 22:14:15 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid the call to charpsize2str if size == 0 instead Message-ID: <20140521201415.0C5CF1C011F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71636:9f4587150929 Date: 2014-05-21 16:12 -0400 http://bitbucket.org/pypy/pypy/changeset/9f4587150929/ Log: avoid the call to charpsize2str if size == 0 instead diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -2,7 +2,7 @@ from rpython.rlib import jit from rpython.rlib.buffer import Buffer -from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.annlowlevel import llstr @@ -613,17 +613,11 @@ array._charbuf_stop() def getslice(self, start, stop, step, size): + if size == 0: + return '' if step == 1: data = self.array._charbuf_start() try: - if not we_are_translated(): - # rffi.ptradd(NULL, ...) doesn't work untranslated. - # It returns nonsense translated, but its return value is - # unused if size == 0, which is the case if data == NULL - if self.array._buffer_as_unsigned() == 0: - assert size == 0 - return '' - return rffi.charpsize2str(rffi.ptradd(data, start), size) finally: self.array._charbuf_stop() From noreply at buildbot.pypy.org Wed May 21 22:42:34 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:34 +0200 (CEST) Subject: [pypy-commit] pypy cli-jit: Close obsolete branch (ootype was removed). Message-ID: <20140521204234.73AE81C350E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: cli-jit Changeset: r71637:bb6ebd3f9652 Date: 2014-05-21 22:26 +0200 http://bitbucket.org/pypy/pypy/changeset/bb6ebd3f9652/ Log: Close obsolete branch (ootype was removed). From noreply at buildbot.pypy.org Wed May 21 22:42:35 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:35 +0200 (CEST) Subject: [pypy-commit] pypy avm: Close abandoned branch. Message-ID: <20140521204235.9A1AC1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: avm Changeset: r71638:21ce47c5289a Date: 2014-05-21 22:26 +0200 http://bitbucket.org/pypy/pypy/changeset/21ce47c5289a/ Log: Close abandoned branch. From noreply at buildbot.pypy.org Wed May 21 22:42:36 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:36 +0200 (CEST) Subject: [pypy-commit] pypy kill-single-impl-multimethods: Close superseded branch. Message-ID: <20140521204236.C675E1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-single-impl-multimethods Changeset: r71639:1ee0f2429922 Date: 2014-05-21 22:28 +0200 http://bitbucket.org/pypy/pypy/changeset/1ee0f2429922/ Log: Close superseded branch. From noreply at buildbot.pypy.org Wed May 21 22:42:37 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:37 +0200 (CEST) Subject: [pypy-commit] pypy kill-more-multimethods: Close superseded branch. Message-ID: <20140521204237.F0A7B1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-more-multimethods Changeset: r71640:efd770405d58 Date: 2014-05-21 22:28 +0200 http://bitbucket.org/pypy/pypy/changeset/efd770405d58/ Log: Close superseded branch. From noreply at buildbot.pypy.org Wed May 21 22:42:39 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:39 +0200 (CEST) Subject: [pypy-commit] pypy json-decoder-speedups: Close superseded branch. Message-ID: <20140521204239.281C91C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: json-decoder-speedups Changeset: r71641:65809b9cfa70 Date: 2014-05-21 22:29 +0200 http://bitbucket.org/pypy/pypy/changeset/65809b9cfa70/ Log: Close superseded branch. From noreply at buildbot.pypy.org Wed May 21 22:42:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:40 +0200 (CEST) Subject: [pypy-commit] pypy bytearray-refactor: Close superseded branch. Message-ID: <20140521204240.52E381C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: bytearray-refactor Changeset: r71642:e4555228a4eb Date: 2014-05-21 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/e4555228a4eb/ Log: Close superseded branch. From noreply at buildbot.pypy.org Wed May 21 22:42:41 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:41 +0200 (CEST) Subject: [pypy-commit] pypy embedded-pypy: Close superseded branch (see pypy/doc/embedding.rst). Message-ID: <20140521204241.687E91C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: embedded-pypy Changeset: r71643:07f342e0a4d4 Date: 2014-05-21 22:32 +0200 http://bitbucket.org/pypy/pypy/changeset/07f342e0a4d4/ Log: Close superseded branch (see pypy/doc/embedding.rst). From noreply at buildbot.pypy.org Wed May 21 22:42:42 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:42 +0200 (CEST) Subject: [pypy-commit] pypy jvm-improvements: Close obsolete branch (ootype was removed). Message-ID: <20140521204242.DA0E01C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: jvm-improvements Changeset: r71644:2cb3ff435de1 Date: 2014-05-21 22:33 +0200 http://bitbucket.org/pypy/pypy/changeset/2cb3ff435de1/ Log: Close obsolete branch (ootype was removed). From noreply at buildbot.pypy.org Wed May 21 22:42:43 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:43 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head bb6ebd3f9652 on branch cli-jit Message-ID: <20140521204243.EFAF61C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71645:c9cb1334cc78 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/c9cb1334cc78/ Log: Merge closed head bb6ebd3f9652 on branch cli-jit From noreply at buildbot.pypy.org Wed May 21 22:42:45 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:45 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 21ce47c5289a on branch avm Message-ID: <20140521204245.0C75F1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71646:2158af533784 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/2158af533784/ Log: Merge closed head 21ce47c5289a on branch avm From noreply at buildbot.pypy.org Wed May 21 22:42:46 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:46 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1ee0f2429922 on branch kill-single-impl-multimethods Message-ID: <20140521204246.131331C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71647:40bb14b60014 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/40bb14b60014/ Log: Merge closed head 1ee0f2429922 on branch kill-single-impl- multimethods From noreply at buildbot.pypy.org Wed May 21 22:42:47 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:47 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head efd770405d58 on branch kill-more-multimethods Message-ID: <20140521204247.16F4F1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71648:4e9e72d58055 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/4e9e72d58055/ Log: Merge closed head efd770405d58 on branch kill-more-multimethods From noreply at buildbot.pypy.org Wed May 21 22:42:48 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:48 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 65809b9cfa70 on branch json-decoder-speedups Message-ID: <20140521204248.18F211C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71649:f3a3296b9a9c Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/f3a3296b9a9c/ Log: Merge closed head 65809b9cfa70 on branch json-decoder-speedups From noreply at buildbot.pypy.org Wed May 21 22:42:49 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:49 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head e4555228a4eb on branch bytearray-refactor Message-ID: <20140521204249.1BA421C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71650:810cff5c0af2 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/810cff5c0af2/ Log: Merge closed head e4555228a4eb on branch bytearray-refactor From noreply at buildbot.pypy.org Wed May 21 22:42:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:50 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 07f342e0a4d4 on branch embedded-pypy Message-ID: <20140521204250.1E26A1C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71651:150dc75c1a76 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/150dc75c1a76/ Log: Merge closed head 07f342e0a4d4 on branch embedded-pypy From noreply at buildbot.pypy.org Wed May 21 22:42:51 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:51 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 2cb3ff435de1 on branch jvm-improvements Message-ID: <20140521204251.203961C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71652:31ba9bd98266 Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/31ba9bd98266/ Log: Merge closed head 2cb3ff435de1 on branch jvm-improvements From noreply at buildbot.pypy.org Wed May 21 22:42:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 21 May 2014 22:42:52 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20140521204252.230B41C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r71653:749435f838fa Date: 2014-05-21 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/749435f838fa/ Log: re-close this branch From noreply at buildbot.pypy.org Wed May 21 23:29:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 21 May 2014 23:29:21 +0200 (CEST) Subject: [pypy-commit] pypy default: four space indent Message-ID: <20140521212921.7BD121C0299@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71654:75d6b6f37ddd Date: 2014-05-21 14:28 -0700 http://bitbucket.org/pypy/pypy/changeset/75d6b6f37ddd/ Log: four space indent diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -25,13 +25,13 @@ on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally - try: - import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - except ImportError: - print('sphinx_rtd_theme is not installed') - html_theme = 'default' + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' # otherwise, readthedocs.org uses their theme by default, so no need to specify it From noreply at buildbot.pypy.org Wed May 21 23:31:59 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 May 2014 23:31:59 +0200 (CEST) Subject: [pypy-commit] pypy packaging: start to modernize packaging Message-ID: <20140521213159.ABDB41C0299@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71655:595408acdcbd Date: 2014-05-21 22:12 +0300 http://bitbucket.org/pypy/pypy/changeset/595408acdcbd/ Log: start to modernize packaging diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,20 +3,21 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py --base-dir pypy-base-dir [--options] -Usually you would do: package.py ../../.. pypy-VER-PLATFORM -The output is found in the directory /tmp/usession-YOURNAME/build/. +Usually you would do: package.py --version-name pypy-VER-PLATFORM +The output is found in the directory from --builddir, +by default /tmp/usession-YOURNAME/build/. """ import shutil import sys import os #Add toplevel repository dir to sys.path -sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))) +basedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) +sys.path.insert(0,basedir) import py import fnmatch -from rpython.tool.udir import udir import subprocess if sys.version_info < (2,6): py.test.skip("requires 2.6 so far") @@ -40,15 +41,22 @@ class PyPyCNotFound(Exception): pass -def fix_permissions(basedir): +def fix_permissions(dirname): if sys.platform != 'win32': - os.system("chmod -R a+rX %s" % basedir) - os.system("chmod -R g-w %s" % basedir) + os.system("chmod -R a+rX %s" % dirname) + os.system("chmod -R g-w %s" % dirname) -def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', - copy_to_dir=None, override_pypy_c=None, nostrip=False, - withouttk=False): - assert '/' not in rename_pypy_c +def generate_license(base_file, options): + with open(base_file) as fid: + txt = fid.read() + return txt + +def package(basedir, options): + name = options.name + rename_pypy_c = options.pypy_c + override_pypy_c = options.override_pypy_c + withouttk = options.tk + basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' @@ -83,13 +91,13 @@ You can either install Tk development headers package or add --without-tk option to skip packaging binary CFFI extension.""" sys.exit(1) - #Can the dependencies be found from cffi somehow? - win_extras += ['tcl85.dll', 'tk85.dll'] + #Can the dependencies be found from cffi somehow? + win_extras += ['tcl85.dll', 'tk85.dll'] if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] # - builddir = udir.ensure("build", dir=True) + builddir = options.builddir pypydir = builddir.ensure(name, dir=True) includedir = basedir.join('include') # Recursively copy all headers, shutil has only ignore @@ -116,7 +124,7 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - importlib_name = 'python27.lib' + importlib_name = 'python27.lib' if pypy_c.dirpath().join(importlib_name).check(): shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), str(pypydir.join('include/python27.lib'))) @@ -153,7 +161,7 @@ for file in ['LICENSE', 'README.rst']: shutil.copy(str(basedir.join(file)), str(pypydir)) for file in ['_testcapimodule.c', '_ctypes_test.c']: - shutil.copyfile(str(basedir.join('lib_pypy', file)), + shutil.copyfile(str(basedir.join('lib_pypy', file)), str(pypydir.join('lib_pypy', file))) # spdir = pypydir.ensure('site-packages', dir=True) @@ -167,17 +175,17 @@ for source, target in binaries: archive = bindir.join(target) shutil.copy(str(source), str(archive)) + fix_permissions(builddir) + old_dir = os.getcwd() - fix_permissions(builddir) try: os.chdir(str(builddir)) - # - # 'strip' fun: see issue #587 - if not nostrip: + if not options.nostrip: for source, target in binaries: if sys.platform == 'win32': pass elif sys.platform == 'darwin': + # 'strip' fun: see issue #587 for why -x os.system("strip -x " + str(bindir.join(target))) # ignore errors else: os.system("strip " + str(bindir.join(target))) # ignore errors @@ -215,34 +223,37 @@ print "Ready in %s" % (builddir,) return builddir # for tests - -def print_usage(): - print >>sys.stderr, __doc__ - sys.exit(1) - - if __name__ == '__main__': - if len(sys.argv) == 1: - print_usage() - - args = sys.argv[1:] - kw = {} - - for i, arg in enumerate(args): - if arg == '--nostrip': - kw['nostrip'] = True - elif arg == '--without-tk': - kw['withouttk'] = True - elif not arg.startswith('--'): - break - else: - print_usage() + import argparse + if sys.platform == 'win32': + pypy_exe = 'pypy.exe' + license_base = os.path.join(basedir,'../local') # as on buildbot YMMV + else: + pypy_exe = 'pypy' + license_base = '/usr/share/doc' + parser = argparse.ArgumentParser() + parser.add_argument('--without-tk', dest='no_tk', action='store_true', + help='build and package the cffi tkinter module') + parser.add_argument('--without-cffi', dest='no_cffi', action='store_true', + help='do not pre-import any cffi modules') + parser.add_argument('--nostrip', dest='nostrip', action='store_true', + help='do not strip the exe, making it ~10MB larger') + parser.add_argument('--rename_pypy_c', dest='pypy_c', type=str, default=pypy_exe, + help='target executable name, defaults to "pypy"') + parser.add_argument('--license_base', type=str, default=license_base, + help='where to start looking for third party upstream licensing info') + parser.add_argument('--builddir', type=str, default='', + help='tmp dir for packaging') + options = parser.parse_args() if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): - kw['nostrip'] = True + options.nostrip = True if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"): - kw['withouttk'] = True - - args = args[i:] - package(*args, **kw) + options.tk = True + if not options.builddir: + # The import actually creates the udir directory + from rpython.tool.udir import udir + options.builddir = udir.ensure("build", dir=True) + assert '/' not in options.rename_pypy_c + package(basedir, options) From noreply at buildbot.pypy.org Wed May 21 23:32:00 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 May 2014 23:32:00 +0200 (CEST) Subject: [pypy-commit] pypy packaging: rationalize some logic, create 'generate_license()' Message-ID: <20140521213200.D9E0D1C0299@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71656:56e2e64efe2c Date: 2014-05-21 22:49 +0300 http://bitbucket.org/pypy/pypy/changeset/56e2e64efe2c/ Log: rationalize some logic, create 'generate_license()' diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -41,6 +41,9 @@ class PyPyCNotFound(Exception): pass +class MissingDependenciesError(Exception): + pass + def fix_permissions(dirname): if sys.platform != 'win32': os.system("chmod -R a+rX %s" % dirname) @@ -51,11 +54,26 @@ txt = fid.read() return txt +def create_cffi_import_libraries(pypy_c, options): + modules = ['_sqlite3'] + subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) + if not sys.platform == 'win32': + modules += ['_curses', 'syslog', 'gdbm', '_sqlite3'] + if not options.no_tk: + modules.append(('_tkinter')) + for module in modules: + try: + subprocess.check_call([str(pypy_c), '-c', 'import ' + module]) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building %{0} bindings failed. +You can either install development headers package or +add --without-{0} option to skip packaging binary CFFI extension.""".format(module) + raise MissingDependenciesError(module) + def package(basedir, options): name = options.name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c - withouttk = options.tk basedir = py.path.local(basedir) if override_pypy_c is None: @@ -76,23 +94,9 @@ raise PyPyCNotFound( 'Bogus path: %r does not exist (see docstring for more info)' % (os.path.dirname(str(pypy_c)),)) - win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll'] - subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) - if not sys.platform == 'win32': - subprocess.check_call([str(pypy_c), '-c', 'import _curses']) - subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - subprocess.check_call([str(pypy_c), '-c', 'import gdbm']) - if not withouttk: - try: - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) - except subprocess.CalledProcessError: - print >>sys.stderr, """Building Tk bindings failed. -You can either install Tk development headers package or -add --without-tk option to skip packaging binary CFFI extension.""" - sys.exit(1) - #Can the dependencies be found from cffi somehow? - win_extras += ['tcl85.dll', 'tk85.dll'] + if not options.no_cffi: + create_cffi_import_libraries(pypy_c, options) + if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -110,10 +114,11 @@ pypydir.ensure('include', dir=True) if sys.platform == 'win32': - #Don't include a mscvrXX.dll, users should get their own. - #Instructions are provided on the website. - # Can't rename a DLL: it is always called 'libpypy-c.dll' + win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', + 'libeay32.dll', 'ssleay32.dll'] + if not options.no_tk: + win_extras += ['tcl85.dll', 'tk85.dll'] for extra in win_extras: p = pypy_c.dirpath().join(extra) @@ -135,7 +140,7 @@ # XXX users will complain that they cannot compile cpyext # modules for windows, has the lib moved or are there no # exported functions in the dll so no import library is created? - if not withouttk: + if not options.no_tk: try: p = pypy_c.dirpath().join('tcl85.dll') if not p.check(): @@ -147,7 +152,7 @@ tk85.dll and tcl85.dll found, expecting to find runtime in ..\\lib directory next to the dlls, as per build instructions.""" import traceback;traceback.print_exc() - sys.exit(1) + raise MissingDependenciesError('Tk runtime') # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. @@ -158,11 +163,14 @@ str(pypydir.join('lib_pypy')), ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~', '*.c', '*.o')) - for file in ['LICENSE', 'README.rst']: + for file in ['README.rst',]: shutil.copy(str(basedir.join(file)), str(pypydir)) for file in ['_testcapimodule.c', '_ctypes_test.c']: shutil.copyfile(str(basedir.join('lib_pypy', file)), str(pypydir.join('lib_pypy', file))) + license = generate_license(str(basedir.join('LICENSE')), options) + with open(pypydir.join('LICENSE'), 'w') as LICENSE: + LICENSE.write(license) # spdir = pypydir.ensure('site-packages', dir=True) shutil.copy(str(basedir.join('site-packages', 'README')), str(spdir)) From noreply at buildbot.pypy.org Wed May 21 23:32:02 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 May 2014 23:32:02 +0200 (CEST) Subject: [pypy-commit] pypy packaging: move file for backward compatability Message-ID: <20140521213202.0FA211C0299@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71657:0205dc0a32b3 Date: 2014-05-21 23:56 +0300 http://bitbucket.org/pypy/pypy/changeset/0205dc0a32b3/ Log: move file for backward compatability diff --git a/pypy/tool/release/package.py b/pypy/tool/release/create_package.py rename from pypy/tool/release/package.py rename to pypy/tool/release/create_package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/create_package.py @@ -72,6 +72,8 @@ def package(basedir, options): name = options.name + if not name: + name = 'pypy-nightly' rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c @@ -224,14 +226,14 @@ raise OSError('"tar" returned exit status %r' % e) finally: os.chdir(old_dir) - if copy_to_dir is not None: - print "Copying %s to %s" % (archive, copy_to_dir) - shutil.copy(archive, str(copy_to_dir)) + if options.targetdir is not None: + print "Copying %s to %s" % (archive, options.targetdir) + shutil.copy(archive, options.targetdir) else: print "Ready in %s" % (builddir,) return builddir # for tests -if __name__ == '__main__': +def create_package(args): import argparse if sys.platform == 'win32': pypy_exe = 'pypy.exe' @@ -248,11 +250,15 @@ help='do not strip the exe, making it ~10MB larger') parser.add_argument('--rename_pypy_c', dest='pypy_c', type=str, default=pypy_exe, help='target executable name, defaults to "pypy"') + parser.add_argument('--archive-name', dest='name', type=str, default='', + help='pypy-VER-PLATFORM') parser.add_argument('--license_base', type=str, default=license_base, help='where to start looking for third party upstream licensing info') parser.add_argument('--builddir', type=str, default='', help='tmp dir for packaging') - options = parser.parse_args() + parser.add_argument('--targetdir', type=str, default='', + help='destination dir for archive') + options = parser.parse_args(args) if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): options.nostrip = True @@ -265,3 +271,7 @@ options.builddir = udir.ensure("build", dir=True) assert '/' not in options.rename_pypy_c package(basedir, options) + +if __name__ == '__main__': + import sys + create_package(sys.args) From noreply at buildbot.pypy.org Wed May 21 23:32:03 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 May 2014 23:32:03 +0200 (CEST) Subject: [pypy-commit] pypy packaging: add backward compatible file for buildbots Message-ID: <20140521213203.322101C0299@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71658:7302e6d7e108 Date: 2014-05-21 23:57 +0300 http://bitbucket.org/pypy/pypy/changeset/7302e6d7e108/ Log: add backward compatible file for buildbots diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py new file mode 100755 --- /dev/null +++ b/pypy/tool/release/package.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +from create_package import * + +if __name__ == '__main__': + import sys + args = ['create_package',] + #package.py [--nostrip] [--without-tk] root-pypy-dir [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + if len(sys.argv) == 1: + create_package([__file__, '-h']) + + for i, arg in enumerate(sys.argv[1:]): + if arg == '-h' or arg == '--help': + create_package([__file__, '-h']) + elif arg in ['--nostrip', '--without-tk']: + args.append(arg) + elif not arg.startswith('--'): + break + else: + create_package([__file__, '-h']) + i += 1 + if len(sys.argv) > i: + # root-pypy-dir, ignore + i += 1 + if len(sys.argv) > i: + args += ['--archive-name', sys.argv[i]] + i += 1 + if len(sys.argv) > i: + args += ['--rename_pypy_c', sys.argv[i]] + i += 1 + if len(sys.argv) > i: + args += ['--targetdir', sys.argv[i]] + i += 1 + +else: + print 'please update to use create_package directly instead' From noreply at buildbot.pypy.org Wed May 21 23:32:04 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 May 2014 23:32:04 +0200 (CEST) Subject: [pypy-commit] pypy packaging: tests pass Message-ID: <20140521213204.5D0581C0299@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71659:3d5078cb9bc5 Date: 2014-05-22 00:28 +0300 http://bitbucket.org/pypy/pypy/changeset/3d5078cb9bc5/ Log: tests pass diff --git a/pypy/tool/release/create_package.py b/pypy/tool/release/create_package.py --- a/pypy/tool/release/create_package.py +++ b/pypy/tool/release/create_package.py @@ -65,7 +65,7 @@ try: subprocess.check_call([str(pypy_c), '-c', 'import ' + module]) except subprocess.CalledProcessError: - print >>sys.stderr, """Building %{0} bindings failed. + print >>sys.stderr, """Building {0} bindings failed. You can either install development headers package or add --without-{0} option to skip packaging binary CFFI extension.""".format(module) raise MissingDependenciesError(module) @@ -78,7 +78,7 @@ override_pypy_c = options.override_pypy_c basedir = py.path.local(basedir) - if override_pypy_c is None: + if not override_pypy_c: basename = 'pypy-c' if sys.platform == 'win32': basename += '.exe' @@ -171,7 +171,7 @@ shutil.copyfile(str(basedir.join('lib_pypy', file)), str(pypydir.join('lib_pypy', file))) license = generate_license(str(basedir.join('LICENSE')), options) - with open(pypydir.join('LICENSE'), 'w') as LICENSE: + with open(str(pypydir.join('LICENSE')), 'w') as LICENSE: LICENSE.write(license) # spdir = pypydir.ensure('site-packages', dir=True) @@ -226,7 +226,7 @@ raise OSError('"tar" returned exit status %r' % e) finally: os.chdir(old_dir) - if options.targetdir is not None: + if options.targetdir: print "Copying %s to %s" % (archive, options.targetdir) shutil.copy(archive, options.targetdir) else: @@ -258,6 +258,8 @@ help='tmp dir for packaging') parser.add_argument('--targetdir', type=str, default='', help='destination dir for archive') + parser.add_argument('--override_pypy_c', type=str, default='', + help='use as pypy exe, default is pypy/goal/pypy-c as exe source name') options = parser.parse_args(args) if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): @@ -269,8 +271,8 @@ # The import actually creates the udir directory from rpython.tool.udir import udir options.builddir = udir.ensure("build", dir=True) - assert '/' not in options.rename_pypy_c - package(basedir, options) + assert '/' not in options.pypy_c + return package(basedir, options) if __name__ == '__main__': import sys diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -1,6 +1,21 @@ #!/usr/bin/env python -from create_package import * +from create_package import create_package +from create_package import fix_permissions, USE_ZIPFILE_MODULE #for tests + +def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', + copy_to_dir=None, override_pypy_c=None, nostrip=False, + withouttk=False): + args = ['--archive-name', name, '--rename_pypy_c', rename_pypy_c] + if copy_to_dir: + args += ['--targetdir', copy_to_dir] + if override_pypy_c: + args += ['--override_pypy_c'] + if nostrip: + args += ['--nostrip'] + if withouttk: + args += ['--without-tk'] + return create_package(args) if __name__ == '__main__': import sys @@ -31,6 +46,9 @@ if len(sys.argv) > i: args += ['--targetdir', sys.argv[i]] i += 1 - + if len(sys.argv) > i: + args += ['--override_pypy_c'] + i += 1 + create_package(args) else: print 'please update to use create_package directly instead' diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -1,7 +1,7 @@ import py from pypy.conftest import pypydir -from pypy.tool.release import package +from pypy.tool.release import package, create_package from pypy.module.sys.version import CPYTHON_VERSION import tarfile, zipfile, sys @@ -36,7 +36,7 @@ assert not prefix.join('lib_pypy', 'ctypes_configure').check() assert prefix.join('LICENSE').check() assert prefix.join('README.rst').check() - if package.USE_ZIPFILE_MODULE: + if create_package.USE_ZIPFILE_MODULE: zh = zipfile.ZipFile(str(builddir.join('%s.zip' % test))) assert zh.open('%s/lib_pypy/syslog.py' % test) else: @@ -59,7 +59,7 @@ def check_include(name): if includedir.join(name).check(file=True): member = '%s/include/%s' % (test, name) - if package.USE_ZIPFILE_MODULE: + if create_package.USE_ZIPFILE_MODULE: assert zh.open(member) else: assert th.getmember(member) @@ -74,13 +74,12 @@ pypy_c.remove() def test_with_zipfile_module(): - from pypy.tool.release import package - prev = package.USE_ZIPFILE_MODULE + prev = create_package.USE_ZIPFILE_MODULE try: - package.USE_ZIPFILE_MODULE = True + create_package.USE_ZIPFILE_MODULE = True test_dir_structure(test='testzipfile') finally: - package.USE_ZIPFILE_MODULE = prev + create_package.USE_ZIPFILE_MODULE = prev def test_fix_permissions(tmpdir): if sys.platform == 'win32': @@ -100,7 +99,7 @@ file2.chmod(0640) pypy.chmod(0700) # - package.fix_permissions(tmpdir) + create_package.fix_permissions(tmpdir) check(mydir, 0755) check(bin, 0755) check(file1, 0644) From noreply at buildbot.pypy.org Thu May 22 04:18:28 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:18:28 +0200 (CEST) Subject: [pypy-commit] pypy default: OperationError -> oefmt in memoryobject.py Message-ID: <20140522021828.7F9C01C011F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r71660:e2f6a65a50a7 Date: 2014-05-22 02:08 +0200 http://bitbucket.org/pypy/pypy/changeset/e2f6a65a50a7/ Log: OperationError -> oefmt in memoryobject.py diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -5,7 +5,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty @@ -83,7 +83,7 @@ def descr_getitem(self, space, w_index): start, stop, step = space.decode_index(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only return space.wrap(self.buf.getitem(start)) res = self.getslice(start, stop) @@ -91,15 +91,14 @@ def descr_setitem(self, space, w_index, w_obj): if self.buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "cannot modify read-only memory")) - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + raise oefmt(space.w_TypeError, "cannot modify read-only memory") + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) if value.getlength() != size: - raise OperationError(space.w_ValueError, space.wrap( - "cannot modify size of memoryview object")) + raise oefmt(space.w_ValueError, + "cannot modify size of memoryview object") if step == 0: # index only self.buf.setitem(start, value.getitem(0)) elif step == 1: From noreply at buildbot.pypy.org Thu May 22 04:18:29 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:18:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Inline W_MemoryView.getslice() and simplify. Message-ID: <20140522021829.BB7D01C011F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r71661:6e9376d22e0e Date: 2014-05-22 02:37 +0200 http://bitbucket.org/pypy/pypy/changeset/6e9376d22e0e/ Log: Inline W_MemoryView.getslice() and simplify. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -61,15 +61,6 @@ def getlength(self): return self.buf.getlength() - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = SubBuffer(self.buf, start, size) - return W_MemoryView(buf) - def descr_tobytes(self, space): return space.wrap(self.as_str()) @@ -81,13 +72,14 @@ return space.newlist(result) def descr_getitem(self, space, w_index): - start, stop, step = space.decode_index(w_index, self.getlength()) + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only return space.wrap(self.buf.getitem(start)) - res = self.getslice(start, stop) - return space.wrap(res) + else: + buf = SubBuffer(self.buf, start, size) + return W_MemoryView(buf) def descr_setitem(self, space, w_index, w_obj): if self.buf.readonly: From noreply at buildbot.pypy.org Thu May 22 04:18:31 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:18:31 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Inline and kill _buffer_setitem. Message-ID: <20140522021831.08D741C011F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71662:0114d1df7470 Date: 2014-05-22 03:33 +0200 http://bitbucket.org/pypy/pypy/changeset/0114d1df7470/ Log: Inline and kill _buffer_setitem. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -10,23 +10,6 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty -def _buffer_setitem(space, buf, w_index, w_obj): - if buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "cannot modify read-only memory")) - start, stop, step, size = space.decode_index4(w_index, buf.getlength()) - if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) - value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) - if value.getlength() != size: - raise OperationError(space.w_ValueError, space.wrap( - "cannot modify size of memoryview object")) - if step == 0: # index only - buf.setitem(start, value.getitem(0)) - elif step == 1: - buf.setslice(start, value.as_str()) - - class W_MemoryView(W_Root): """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. @@ -119,7 +102,20 @@ def descr_setitem(self, space, w_index, w_obj): self._check_released(space) - _buffer_setitem(space, self.buf, w_index, w_obj) + if self.buf.readonly: + raise OperationError(space.w_TypeError, space.wrap( + "cannot modify read-only memory")) + start, stop, step, size = space.decode_index4(w_index, self.getlength()) + if step not in (0, 1): + raise OperationError(space.w_NotImplementedError, space.wrap("")) + value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) + if value.getlength() != size: + raise OperationError(space.w_ValueError, space.wrap( + "cannot modify size of memoryview object")) + if step == 0: # index only + self.buf.setitem(start, value.getitem(0)) + elif step == 1: + self.buf.setslice(start, value.as_str()) def descr_len(self, space): self._check_released(space) From noreply at buildbot.pypy.org Thu May 22 04:18:32 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:18:32 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: OperationError -> oefmt in memoryobject.py Message-ID: <20140522021832.3B6891C011F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71663:8042bfb1431d Date: 2014-05-22 02:08 +0200 http://bitbucket.org/pypy/pypy/changeset/8042bfb1431d/ Log: OperationError -> oefmt in memoryobject.py diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -5,7 +5,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty @@ -80,8 +80,8 @@ self._check_released(space) buf = self.buf if buf.format != 'B': - raise OperationError(space.w_NotImplementedError, space.wrap( - "tolist() only supports byte views")) + raise oefmt(space.w_NotImplementedError, + "tolist() only supports byte views") result = [] for i in range(buf.getlength()): result.append(space.wrap(ord(buf.getitem(i)[0]))) @@ -91,7 +91,7 @@ self._check_released(space) start, stop, step = space.decode_index(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only a = start * self.buf.itemsize b = a + self.buf.itemsize @@ -103,15 +103,14 @@ def descr_setitem(self, space, w_index, w_obj): self._check_released(space) if self.buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "cannot modify read-only memory")) + raise oefmt(space.w_TypeError, "cannot modify read-only memory") start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) if value.getlength() != size: - raise OperationError(space.w_ValueError, space.wrap( - "cannot modify size of memoryview object")) + raise oefmt(space.w_ValueError, + "cannot modify size of memoryview object") if step == 0: # index only self.buf.setitem(start, value.getitem(0)) elif step == 1: @@ -161,8 +160,8 @@ def _check_released(self, space): if self.buf is None: - raise OperationError(space.w_ValueError, space.wrap( - "operation forbidden on released memoryview object")) + raise oefmt(space.w_ValueError, + "operation forbidden on released memoryview object") def descr_enter(self, space): self._check_released(space) From noreply at buildbot.pypy.org Thu May 22 04:18:33 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:18:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Inline W_MemoryView.getslice() and simplify. Message-ID: <20140522021833.7BA7A1C011F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71664:8faf41ae175d Date: 2014-05-22 02:37 +0200 http://bitbucket.org/pypy/pypy/changeset/8faf41ae175d/ Log: Inline W_MemoryView.getslice() and simplify. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -62,16 +62,6 @@ def getlength(self): return self.buf.getlength() // self.buf.itemsize - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = SubBuffer(self.buf, start * self.buf.itemsize, - size * self.buf.itemsize) - return W_MemoryView(buf) - def descr_tobytes(self, space): self._check_released(space) return space.wrapbytes(self.as_str()) @@ -89,7 +79,7 @@ def descr_getitem(self, space, w_index): self._check_released(space) - start, stop, step = space.decode_index(w_index, self.getlength()) + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only @@ -97,8 +87,10 @@ b = a + self.buf.itemsize return space.wrapbytes( ''.join([self.buf.getitem(i) for i in range(a, b)])) - res = self.getslice(start, stop) - return space.wrap(res) + else: + buf = SubBuffer(self.buf, start * self.buf.itemsize, + size * self.buf.itemsize) + return W_MemoryView(buf) def descr_setitem(self, space, w_index, w_obj): self._check_released(space) From noreply at buildbot.pypy.org Thu May 22 04:18:34 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:18:34 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Test and fix memoryview's setslice to support itemsize != 1. Message-ID: <20140522021834.A41C91C011F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71665:6f1e8a720ef2 Date: 2014-05-22 04:17 +0200 http://bitbucket.org/pypy/pypy/changeset/6f1e8a720ef2/ Log: Test and fix memoryview's setslice to support itemsize != 1. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -100,13 +100,10 @@ if step not in (0, 1): raise oefmt(space.w_NotImplementedError, "") value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) - if value.getlength() != size: + if value.getlength() != size * self.buf.itemsize: raise oefmt(space.w_ValueError, "cannot modify size of memoryview object") - if step == 0: # index only - self.buf.setitem(start, value.getitem(0)) - elif step == 1: - self.buf.setslice(start, value.as_str()) + self.buf.setslice(start * self.buf.itemsize, value.as_str()) def descr_len(self, space): self._check_released(space) diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -127,3 +127,12 @@ assert cm is v raises(ValueError, bytes, v) assert "released memory" in repr(v) + + def test_int_array_buffer(self): + import array + m = memoryview(array.array('i', list(range(10)))) + assert len(m) == 10 + assert len(m.tobytes()) == 40 + assert m[0] == b'\x00\x00\x00\x00' + m[0] = b'\x00\x00\x00\x01' + assert m[0] == b'\x00\x00\x00\x01' From noreply at buildbot.pypy.org Thu May 22 04:32:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 04:32:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: hg merge py3k Message-ID: <20140522023222.5B2EB1C03C4@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71666:6f35f19922e2 Date: 2014-05-22 04:29 +0200 http://bitbucket.org/pypy/pypy/changeset/6f35f19922e2/ Log: hg merge py3k diff too long, truncating to 2000 out of 5503 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,7 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,61 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +146,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +168,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +192,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -205,6 +214,7 @@ Travis Francis Athougies Kristjan Valur Jonsson Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +228,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -235,27 +246,35 @@ Anders Sigfridsson Yasir Suhail Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +282,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -74,6 +74,29 @@ size2 = os.path.getsize(filename) self.assertTrue(size1 > size2 >= size0) + def test_sync(self): + # check if sync works at all, not sure how to check it + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.sync() + + def test_get_key(self): + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g['x'], 'x' * 10000) + + def test_key_with_null_bytes(self): + key = 'a\x00b' + value = 'c\x00d' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) def test_main(): run_unittest(TestGdbm) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py new file mode 100644 --- /dev/null +++ b/lib_pypy/gdbm.py @@ -0,0 +1,174 @@ +import cffi, os + +ffi = cffi.FFI() +ffi.cdef(''' +#define GDBM_READER ... +#define GDBM_WRITER ... +#define GDBM_WRCREAT ... +#define GDBM_NEWDB ... +#define GDBM_FAST ... +#define GDBM_SYNC ... +#define GDBM_NOLOCK ... +#define GDBM_REPLACE ... + +void* gdbm_open(char *, int, int, int, void (*)()); +void gdbm_close(void*); + +typedef struct { + char *dptr; + int dsize; +} datum; + +datum gdbm_fetch(void*, datum); +int gdbm_delete(void*, datum); +int gdbm_store(void*, datum, datum, int); +int gdbm_exists(void*, datum); + +int gdbm_reorganize(void*); + +datum gdbm_firstkey(void*); +datum gdbm_nextkey(void*, datum); +void gdbm_sync(void*); + +char* gdbm_strerror(int); +int gdbm_errno; + +void free(void*); +''') + +try: + lib = ffi.verify(''' + #include "gdbm.h" + ''', libraries=['gdbm']) +except cffi.VerificationError as e: + # distutils does not preserve the actual message, + # but the verification is simple enough that the + # failure must be due to missing gdbm dev libs + raise ImportError('%s: %s' %(e.__class__.__name__, e)) + +class error(Exception): + pass + +def _fromstr(key): + if not isinstance(key, str): + raise TypeError("gdbm mappings have string indices only") + return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} + +class gdbm(object): + ll_dbm = None + + def __init__(self, filename, iflags, mode): + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.size = -1 + if not res: + self._raise_from_errno() + self.ll_dbm = res + + def close(self): + if self.ll_dbm: + lib.gdbm_close(self.ll_dbm) + self.ll_dbm = None + + def _raise_from_errno(self): + if ffi.errno: + raise error(os.strerror(ffi.errno)) + raise error(lib.gdbm_strerror(lib.gdbm_errno)) + + def __len__(self): + if self.size < 0: + self.size = len(self.keys()) + return self.size + + def __setitem__(self, key, value): + self._check_closed() + self._size = -1 + r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self._raise_from_errno() + + def __delitem__(self, key): + self._check_closed() + res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) + + def __contains__(self, key): + self._check_closed() + return lib.gdbm_exists(self.ll_dbm, _fromstr(key)) + has_key = __contains__ + + def __getitem__(self, key): + self._check_closed() + drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) + if not drec.dptr: + raise KeyError(key) + res = str(ffi.buffer(drec.dptr, drec.dsize)) + lib.free(drec.dptr) + return res + + def keys(self): + self._check_closed() + l = [] + key = lib.gdbm_firstkey(self.ll_dbm) + while key.dptr: + l.append(str(ffi.buffer(key.dptr, key.dsize))) + nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + lib.free(key.dptr) + key = nextkey + return l + + def firstkey(self): + self._check_closed() + key = lib.gdbm_firstkey(self.ll_dbm) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def nextkey(self, key): + self._check_closed() + key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def reorganize(self): + self._check_closed() + if lib.gdbm_reorganize(self.ll_dbm) < 0: + self._raise_from_errno() + + def _check_closed(self): + if not self.ll_dbm: + raise error("GDBM object has already been closed") + + __del__ = close + + def sync(self): + self._check_closed() + lib.gdbm_sync(self.ll_dbm) + +def open(filename, flags='r', mode=0o666): + if flags[0] == 'r': + iflags = lib.GDBM_READER + elif flags[0] == 'w': + iflags = lib.GDBM_WRITER + elif flags[0] == 'c': + iflags = lib.GDBM_WRCREAT + elif flags[0] == 'n': + iflags = lib.GDBM_NEWDB + else: + raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + for flag in flags[1:]: + if flag == 'f': + iflags |= lib.GDBM_FAST + elif flag == 's': + iflags |= lib.GDBM_SYNC + elif flag == 'u': + iflags |= lib.GDBM_NOLOCK + else: + raise error("Flag '%s' not supported" % flag) + return gdbm(filename, iflags, mode) + +open_flags = "rwcnfsu" diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '2.2' +version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.2.1' +release = '2.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,10 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* merge PYPY_IRC_TOPIC environment variable handling from previous release - in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and - pypy/interpreter/app_main.py so release versions will not print a random - IRC topic by default. * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.1`_: the latest official release +* `Release 2.3.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.1`: http://pypy.org/download.html +.. _`Release 2.3.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -100,6 +100,8 @@ ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. + Note that using a : in fname is a bad idea, Windows + users, beware. ``:``\ *fname* Full logging, including ``debug_print``. @@ -113,6 +115,11 @@ generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. +``PYPY_IRC_TOPIC`` + If set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. + + .. include:: ../gc_info.rst :start-line: 7 diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst --- a/pypy/doc/release-2.3.0.rst +++ b/pypy/doc/release-2.3.0.rst @@ -93,7 +93,7 @@ * Fix handling of tp_name for type objects .. _`HippyVM`: http://www.hippyvm.com -.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.0.html New Platforms and Features diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -1,70 +1,78 @@ -====================== -Transactional Memory -====================== + +============================= +Software Transactional Memory +============================= .. contents:: This page is about ``pypy-stm``, a special in-development version of PyPy which can run multiple independent CPU-hungry threads in the same -process in parallel. It is side-stepping what is known in the Python -world as the "global interpreter lock (GIL)" problem. +process in parallel. It is a solution to what is known in the Python +world as the "global interpreter lock (GIL)" problem --- it is an +implementation of Python without the GIL. -"STM" stands for Software Transactional Memory, the technique used +"STM" stands for Software `Transactional Memory`_, the technique used internally. This page describes ``pypy-stm`` from the perspective of a user, describes work in progress, and finally gives references to more implementation details. -This work was done mostly by Remi Meier and Armin Rigo. Thanks to all -donors for crowd-funding the work so far! Please have a look at the -`2nd call for donation`_. +This work was done by Remi Meier and Armin Rigo. Thanks to all donors +for crowd-funding the work so far! Please have a look at the `2nd call +for donation`_. +.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory .. _`2nd call for donation`: http://pypy.org/tmdonate2.html Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% slower than a -regular PyPy, comparing the JIT version in both cases. It is called +``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +listed below, it should be in theory within 20%-50% slower than a +regular PyPy, comparing the JIT version in both cases (but see below!). +It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). -What you get in exchange for this slow-down is that ``pypy-stm`` runs -any multithreaded Python program on multiple CPUs at once. Programs -running two threads or more in parallel should ideally run faster than -in a regular PyPy, either now or soon as issues are fixed. In one way, -that's all there is to it: this is a GIL-less Python, feel free to -`download and try it`__. However, the deeper idea behind the -``pypy-stm`` project is to improve what is so far the state-of-the-art -for using multiple CPUs, which for cases where separate processes don't -work is done by writing explicitly multi-threaded programs. Instead, -``pypy-stm`` is pushing forward an approach to *hide* the threads, as -described below in `atomic sections`_. +The benefit is that the resulting ``pypy-stm`` can execute multiple +threads of Python code in parallel. Programs running two threads or +more in parallel should ideally run faster than in a regular PyPy +(either now, or soon as bugs are fixed). +* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use + it as a drop-in replacement and multithreaded programs will run on + multiple cores. -.. __: +* ``pypy-stm`` does not impose any special API to the user, but it + provides a new pure Python module called `transactional_memory`_ with + features to inspect the state or debug conflicts_ that prevent + parallelization. This module can also be imported on top of a non-STM + PyPy or CPython. -Current status -============== +* Building on top of the way the GIL is removed, we will talk + about `Atomic sections, Transactions, etc.: a better way to write + parallel programs`_. + + +Getting Started +=============== **pypy-stm requires 64-bit Linux for now.** Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu 12.04 binary -here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, -but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel, -in other words it is running a thread pool up to 4 threads emulating normal -threads. +interested in trying it out, you can download a Ubuntu binary here__ +(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are +release mode, but not stripped of debug symbols). The current version +supports four "segments", which means that it will run up to four +threads in parallel. To build a version from sources, you first need to compile a custom -version of clang; we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 ...`` +version of clang(!); we recommend downloading `llvm and clang like +described here`__, but at revision 201645 (use ``svn co -r 201645 `` for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for the very extensive usage that pypy-stm does of a -clang-only feature (without them, you get crashes of clang). Then get +they are fixes for a clang-only feature that hasn't been used so heavily +in the past (without the patches, you get crashes of clang). Then get the branch `stmgc-c7`_ of PyPy and run:: rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py @@ -75,23 +83,31 @@ .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ -Caveats: +.. _caveats: -* So far, small examples work fine, but there are still a number of - bugs. We're busy fixing them. +Current status +-------------- + +* So far, small examples work fine, but there are still a few bugs. + We're busy fixing them as we find them; feel free to `report bugs`_. + +* It runs with an overhead as low as 20% on examples like "richards". + There are also other examples with higher overheads --up to 10x for + "translate.py"-- which we are still trying to understand. One suspect + is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in - `core.h`__). Memory overflows are not detected correctly, so may - cause segmentation faults. + `core.h`__). Memory overflows are not correctly handled; they cause + segfaults. -* The JIT warm-up time is abysmal (as opposed to the regular PyPy's, - which is "only" bad). Moreover, you should run it with a command like - ``pypy-stm --jit trace_limit=60000 args...``; the default value of - 6000 for ``trace_limit`` is currently too low (6000 should become - reasonable again as we improve). Also, in order to produce machine - code, the JIT needs to enter a special single-threaded mode for now. - This all means that you *will* get very bad performance results if - your program doesn't run for *many* seconds for now. +* The JIT warm-up time improved recently but is still bad. In order to + produce machine code, the JIT needs to enter a special single-threaded + mode for now. This means that you will get bad performance results if + your program doesn't run for several seconds, where *several* can mean + *many.* When trying benchmarks, be sure to check that you have + reached the warmed state, i.e. the performance is not improving any + more. This should be clear from the fact that as long as it's + producing more machine code, ``pypy-stm`` will run on a single core. * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large @@ -108,111 +124,197 @@ * The STM system is based on very efficient read/write barriers, which are mostly done (their placement could be improved a bit in JIT-generated machine code). But the overall bookkeeping logic could - see more improvements (see Statistics_ below). - -* You can use `atomic sections`_, but the most visible missing thing is - that you don't get reports about the "conflicts" you get. This would - be the first thing that you need in order to start using atomic - sections more extensively. Also, for now: for better results, try to - explicitly force a transaction break just before (and possibly after) - each large atomic section, with ``time.sleep(0)``. + see more improvements (see `Low-level statistics`_ below). * Forking the process is slow because the complete memory needs to be - copied manually right now. + copied manually. A warning is printed to this effect. -* Very long-running processes should eventually crash on an assertion - error because of a non-implemented overflow of an internal 29-bit - number, but this requires at the very least ten hours --- more - probably, several days or more. +* Very long-running processes (on the order of days) will eventually + crash on an assertion error because of a non-implemented overflow of + an internal 29-bit number. .. _`report bugs`: https://bugs.pypy.org/ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h -Statistics +User Guide ========== + -When a non-main thread finishes, you get statistics printed to stderr, -looking like that:: +Drop-in replacement +------------------- - thread 0x7f73377fe600: - outside transaction 42182 0.506 s - run current 85466 0.000 s - run committed 34262 3.178 s - run aborted write write 6982 0.083 s - run aborted write read 550 0.005 s - run aborted inevitable 388 0.010 s - run aborted other 0 0.000 s - wait free segment 0 0.000 s - wait write read 78 0.027 s - wait inevitable 887 0.490 s - wait other 0 0.000 s - bookkeeping 51418 0.606 s - minor gc 162970 1.135 s - major gc 1 0.019 s - sync pause 59173 1.738 s - spin loop 129512 0.094 s +Multithreaded, CPU-intensive Python programs should work unchanged on +``pypy-stm``. They will run using multiple CPU cores in parallel. -The first number is a counter; the second number gives the associated -time (the amount of real time that the thread was in this state; the sum -of all the times should be equal to the total time between the thread's -start and the thread's end). The most important points are "run -committed", which gives the amount of useful work, and "outside -transaction", which should give the time spent e.g. in library calls -(right now it seems to be a bit larger than that; to investigate). -Everything else is overhead of various forms. (Short-, medium- and -long-term future work involves reducing this overhead :-) +The existing semantics of the GIL (Global Interpreter Lock) are +unchanged: although running on multiple cores in parallel, ``pypy-stm`` +gives the illusion that threads are run serially, with switches only +occurring between bytecodes, not in the middle of them. Programs can +rely on this: using ``shared_list.append()/pop()`` or +``shared_dict.setdefault()`` as synchronization mecanisms continues to +work as expected. -These statistics are not printed out for the main thread, for now. +This works by internally considering the points where a standard PyPy or +CPython would release the GIL, and replacing them with the boundaries of +"transaction". Like their database equivalent, multiple transactions +can execute in parallel, but will commit in some serial order. They +appear to behave as if they were completely run in this serialization +order. Atomic sections -=============== +--------------- -While one of the goal of pypy-stm is to give a GIL-free but otherwise -unmodified Python, the other goal is to push for a better way to use -multithreading. For this, you (as the Python programmer) get an API -in the ``__pypy__.thread`` submodule: +PyPy supports *atomic sections,* which are blocks of code which you want +to execute without "releasing the GIL". *This is experimental and may +be removed in the future.* In STM terms, this means blocks of code that +are executed while guaranteeing that the transaction is not interrupted +in the middle. -* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in - a ``with __pypy__.thread.atomic:`` statement). It runs the whole - block of code without breaking the current transaction --- from - the point of view of a regular CPython/PyPy, this is equivalent to - saying that the GIL will not be released at all between the start and - the end of this block of code. +Here is a usage example:: -The obvious usage is to use atomic blocks in the same way as one would -use locks: to protect changes to some shared data, you do them in a -``with atomic`` block, just like you would otherwise do them in a ``with -mylock`` block after ``mylock = thread.allocate_lock()``. This allows -you not to care about acquiring the correct locks in the correct order; -it is equivalent to having only one global lock. This is how -transactional memory is `generally described`__: as a way to efficiently -execute such atomic blocks, running them in parallel while giving the -illusion that they run in some serial order. + with __pypy__.thread.atomic: + assert len(lst1) == 10 + x = lst1.pop(0) + lst1.append(x) -.. __: http://en.wikipedia.org/wiki/Transactional_memory +In this (bad) example, we are sure that the item popped off one end of +the list is appened again at the other end atomically. It means that +another thread can run ``len(lst1)`` or ``x in lst1`` without any +particular synchronization, and always see the same results, +respectively ``10`` and ``True``. It will never see the intermediate +state where ``lst1`` only contains 9 elements. Atomic sections are +similar to re-entrant locks (they can be nested), but additionally they +protect against the concurrent execution of *any* code instead of just +code that happens to be protected by the same lock in other threads. -However, the less obvious intended usage of atomic sections is as a -wide-ranging replacement of explicit threads. You can turn a program -that is not multi-threaded at all into a program that uses threads -internally, together with large atomic sections to keep the behavior -unchanged. This capability can be hidden in a library or in the -framework you use; the end user's code does not need to be explicitly -aware of using threads. For a simple example of this, see -`transaction.py`_ in ``lib_pypy``. The idea is that if you have a -program where the function ``f(key, value)`` runs on every item of some -big dictionary, you can replace the loop with:: +Note that the notion of atomic sections is very strong. If you write +code like this:: + + with __pypy__.thread.atomic: + time.sleep(10) + +then, if you think about it as if we had a GIL, you are executing a +10-seconds-long atomic transaction without releasing the GIL at all. +This prevents all other threads from progressing at all. While it is +not strictly true in ``pypy-stm``, the exact rules for when other +threads can progress or not are rather complicated; you have to consider +it likely that such a piece of code will eventually block all other +threads anyway. + +Note that if you want to experiment with ``atomic``, you may have to add +manually a transaction break just before the atomic block. This is +because the boundaries of the block are not guaranteed to be the +boundaries of the transaction: the latter is at least as big as the +block, but maybe bigger. Therefore, if you run a big atomic block, it +is a good idea to break the transaction just before. This can be done +e.g. by the hack of calling ``time.sleep(0)``. (This may be fixed at +some point.) + +There are also issues with the interaction of locks and atomic blocks. +This can be seen if you write to files (which have locks), including +with a ``print`` to standard output. If one thread tries to acquire a +lock while running in an atomic block, and another thread has got the +same lock, then the former may fail with a ``thread.error``. The reason +is that "waiting" for some condition to become true --while running in +an atomic block-- does not really make sense. For now you can work +around it by making sure that, say, all your prints are either in an +``atomic`` block or none of them are. (This kind of issue is +theoretically hard to solve.) + + +Locks +----- + +**Not Implemented Yet** + +The thread module's locks have their basic semantic unchanged. However, +using them (e.g. in ``with my_lock:`` blocks) starts an alternative +running mode, called `Software lock elision`_. This means that PyPy +will try to make sure that the transaction extends until the point where +the lock is released, and if it succeeds, then the acquiring and +releasing of the lock will be "elided". This means that in this case, +the whole transaction will technically not cause any write into the lock +object --- it was unacquired before, and is still unacquired after the +transaction. + +This is specially useful if two threads run ``with my_lock:`` blocks +with the same lock. If they each run a transaction that is long enough +to contain the whole block, then all writes into the lock will be elided +and the two transactions will not conflict with each other. As usual, +they will be serialized in some order: one of the two will appear to run +before the other. Simply, each of them executes an "acquire" followed +by a "release" in the same transaction. As explained above, the lock +state goes from "unacquired" to "unacquired" and can thus be left +unchanged. + +This approach can gracefully fail: unlike atomic sections, there is no +guarantee that the transaction runs until the end of the block. If you +perform any input/output while you hold the lock, the transaction will +end as usual just before the input/output operation. If this occurs, +then the lock elision mode is cancelled and the lock's "acquired" state +is really written. + +Even if the lock is really acquired already, a transaction doesn't have +to wait for it to become free again. It can enter the elision-mode anyway +and tentatively execute the content of the block. It is only at the end, +when trying to commit, that the thread will pause. As soon as the real +value stored in the lock is switched back to "unacquired", it can then +proceed and attempt to commit its already-executed transaction (which +can fail and abort and restart from the scratch, as usual). + +Note that this is all *not implemented yet,* but we expect it to work +even if you acquire and release several locks. The elision-mode +transaction will extend until the first lock you acquired is released, +or until the code performs an input/output or a wait operation (for +example, waiting for another lock that is currently not free). In the +common case of acquiring several locks in nested order, they will all be +elided by the same transaction. + +.. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410 + + +Atomic sections, Transactions, etc.: a better way to write parallel programs +---------------------------------------------------------------------------- + +(This section is based on locks as we plan to implement them, but also +works with the existing atomic sections.) + +In the cases where elision works, the block of code can run in parallel +with other blocks of code *even if they are protected by the same lock.* +You still get the illusion that the blocks are run sequentially. This +works even for multiple threads that run each a series of such blocks +and nothing else, protected by one single global lock. This is +basically the Python application-level equivalent of what was done with +the interpreter in ``pypy-stm``: while you think you are writing +thread-unfriendly code because of this global lock, actually the +underlying system is able to make it run on multiple cores anyway. + +This capability can be hidden in a library or in the framework you use; +the end user's code does not need to be explicitly aware of using +threads. For a simple example of this, there is `transaction.py`_ in +``lib_pypy``. The idea is that you write, or already have, some program +where the function ``f(key, value)`` runs on every item of some big +dictionary, say:: + + for key, value in bigdict.items(): + f(key, value) + +Then you simply replace the loop with:: for key, value in bigdict.items(): transaction.add(f, key, value) transaction.run() This code runs the various calls to ``f(key, value)`` using a thread -pool, but every single call is done in an atomic section. The end -result is that the behavior should be exactly equivalent: you don't get -any extra multithreading issue. +pool, but every single call is executed under the protection of a unique +lock. The end result is that the behavior is exactly equivalent --- in +fact it makes little sense to do it in this way on a non-STM PyPy or on +CPython. But on ``pypy-stm``, the various locked calls to ``f(key, +value)`` can tentatively be executed in parallel, even if the observable +result is as if they were executed in some serial order. This approach hides the notion of threads from the end programmer, including all the hard multithreading-related issues. This is not the @@ -223,41 +325,176 @@ only requires that the end programmer identifies where this parallelism is likely to be found, and communicates it to the system, using for example the ``transaction.add()`` scheme. - + .. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP -================== -Other APIs in pypy-stm: +.. _`transactional_memory`: -* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments" - in this pypy-stm. This is the limit above which more threads will not - be able to execute on more cores. (Right now it is limited to 4 due - to inter-segment overhead, but should be increased in the future. It +API of transactional_memory +--------------------------- + +The new pure Python module ``transactional_memory`` runs on both CPython +and PyPy, both with and without STM. It contains: + +* ``getsegmentlimit()``: return the number of "segments" in + this pypy-stm. This is the limit above which more threads will not be + able to execute on more cores. (Right now it is limited to 4 due to + inter-segment overhead, but should be increased in the future. It should also be settable, and the default value should depend on the - number of actual CPUs.) + number of actual CPUs.) If STM is not available, this returns 1. -* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but - raises an exception if you attempt to nest it inside another - ``atomic``. +* ``print_abort_info(minimum_time=0.0)``: debugging help. Each thread + remembers the longest abort or pause it did because of cross-thread + contention_. This function prints it to ``stderr`` if the time lost + is greater than ``minimum_time`` seconds. The record is then + cleared, to make it ready for new events. This function returns + ``True`` if it printed a report, and ``False`` otherwise. -* ``__pypy__.thread.signals_enabled``: a context manager that runs - its block with signals enabled. By default, signals are only - enabled in the main thread; a non-main thread will not receive - signals (this is like CPython). Enabling signals in non-main threads - is useful for libraries where threads are hidden and the end user is - not expecting his code to run elsewhere than in the main thread. -Note that all of this API is (or will be) implemented in a regular PyPy -too: for example, ``with atomic`` will simply mean "don't release the -GIL" and ``getsegmentlimit()`` will return 1. +API of __pypy__.thread +---------------------- -================== +The ``__pypy__.thread`` submodule is a built-in module of PyPy that +contains a few internal built-in functions used by the +``transactional_memory`` module, plus the following: + +* ``__pypy__.thread.atomic``: a context manager to run a block in + fully atomic mode, without "releasing the GIL". (May be eventually + removed?) + +* ``__pypy__.thread.signals_enabled``: a context manager that runs its + block with signals enabled. By default, signals are only enabled in + the main thread; a non-main thread will not receive signals (this is + like CPython). Enabling signals in non-main threads is useful for + libraries where threads are hidden and the end user is not expecting + his code to run elsewhere than in the main thread. + + +.. _contention: + +Conflicts +--------- + +Based on Software Transactional Memory, the ``pypy-stm`` solution is +prone to "conflicts". To repeat the basic idea, threads execute their code +speculatively, and at known points (e.g. between bytecodes) they +coordinate with each other to agree on which order their respective +actions should be "committed", i.e. become globally visible. Each +duration of time between two commit-points is called a transaction. + +A conflict occurs when there is no consistent ordering. The classical +example is if two threads both tried to change the value of the same +global variable. In that case, only one of them can be allowed to +proceed, and the other one must be either paused or aborted (restarting +the transaction). If this occurs too often, parallelization fails. + +How much actual parallelization a multithreaded program can see is a bit +subtle. Basically, a program not using ``__pypy__.thread.atomic`` or +eliding locks, or doing so for very short amounts of time, will +parallelize almost freely (as long as it's not some artificial example +where, say, all threads try to increase the same global counter and do +nothing else). + +However, using if the program requires longer transactions, it comes +with less obvious rules. The exact details may vary from version to +version, too, until they are a bit more stabilized. Here is an +overview. + +Parallelization works as long as two principles are respected. The +first one is that the transactions must not *conflict* with each other. +The most obvious sources of conflicts are threads that all increment a +global shared counter, or that all store the result of their +computations into the same list --- or, more subtly, that all ``pop()`` +the work to do from the same list, because that is also a mutation of +the list. (It is expected that some STM-aware library will eventually +be designed to help with conflict problems, like a STM-aware queue.) + +A conflict occurs as follows: when a transaction commits (i.e. finishes +successfully) it may cause other transactions that are still in progress +to abort and retry. This is a waste of CPU time, but even in the worst +case senario it is not worse than a GIL, because at least one +transaction succeeds (so we get at worst N-1 CPUs doing useless jobs and +1 CPU doing a job that commits successfully). + +Conflicts do occur, of course, and it is pointless to try to avoid them +all. For example they can be abundant during some warm-up phase. What +is important is to keep them rare enough in total. + +Another issue is that of avoiding long-running so-called "inevitable" +transactions ("inevitable" is taken in the sense of "which cannot be +avoided", i.e. transactions which cannot abort any more). Transactions +like that should only occur if you use ``__pypy__.thread.atomic``, +generally become of I/O in atomic blocks. They work, but the +transaction is turned inevitable before the I/O is performed. For all +the remaining execution time of the atomic block, they will impede +parallel work. The best is to organize the code so that such operations +are done completely outside ``__pypy__.thread.atomic``. + +(This is related to the fact that blocking I/O operations are +discouraged with Twisted, and if you really need them, you should do +them on their own separate thread.) + +In case of lock elision, we don't get long-running inevitable +transactions, but a different problem can occur: doing I/O cancels lock +elision, and the lock turns into a real lock, preventing other threads +from committing if they also need this lock. (More about it when lock +elision is implemented and tested.) + + + +Implementation +============== + +XXX this section mostly empty for now + + +Low-level statistics +-------------------- + +When a non-main thread finishes, you get low-level statistics printed to +stderr, looking like that:: + + thread 0x7f73377fe600: + outside transaction 42182 0.506 s + run current 85466 0.000 s + run committed 34262 3.178 s + run aborted write write 6982 0.083 s + run aborted write read 550 0.005 s + run aborted inevitable 388 0.010 s + run aborted other 0 0.000 s + wait free segment 0 0.000 s + wait write read 78 0.027 s + wait inevitable 887 0.490 s + wait other 0 0.000 s + sync commit soon 1 0.000 s + bookkeeping 51418 0.606 s + minor gc 162970 1.135 s + major gc 1 0.019 s + sync pause 59173 1.738 s + longest recordered marker 0.000826 s + "File "x.py", line 5, in f" + +On each line, the first number is a counter, and the second number gives +the associated time --- the amount of real time that the thread was in +this state. The sum of all the times should be equal to the total time +between the thread's start and the thread's end. The most important +points are "run committed", which gives the amount of useful work, and +"outside transaction", which should give the time spent e.g. in library +calls (right now it seems to be larger than that; to investigate). The +various "run aborted" and "wait" entries are time lost due to +conflicts_. Everything else is overhead of various forms. (Short-, +medium- and long-term future work involves reducing this overhead :-) + +The last two lines are special; they are an internal marker read by +``transactional_memory.print_abort_info()``. + +These statistics are not printed out for the main thread, for now. Reference to implementation details -=================================== +----------------------------------- The core of the implementation is in a separate C library called stmgc_, in the c7_ subdirectory. Please see the `README.txt`_ for more @@ -282,3 +519,15 @@ .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py + + + +See also +======== + +See also +https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst +(section about STM). + + +.. include:: _ref.txt diff --git a/pypy/doc/whatsnew-2.3.0.rst b/pypy/doc/whatsnew-2.3.0.rst --- a/pypy/doc/whatsnew-2.3.0.rst +++ b/pypy/doc/whatsnew-2.3.0.rst @@ -167,3 +167,6 @@ .. branch: fix-tpname Changes hacks surrounding W_TypeObject.name to match CPython's tp_name + +.. branch: tkinter_osx_packaging +OS/X specific header path diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,4 +3,4 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: ec864bd08d50 +.. startrev: f556d32f8319 diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -56,7 +56,10 @@ try: space.call_function(w_run_toplevel, w_call_startup_gateway) if rlocale.HAVE_LANGINFO: - rlocale.setlocale(rlocale.LC_ALL, '') + try: + rlocale.setlocale(rlocale.LC_ALL, '') + except rlocale.LocaleError: + pass w_executable = space.fsdecode(space.wrapbytes(argv[0])) w_argv = space.newlist([space.fsdecode(space.wrapbytes(s)) for s in argv[1:]]) diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -37,6 +37,9 @@ PYTHONPATH : %r-separated list of directories prefixed to the default module search path. The result is sys.path. PYTHONIOENCODING: Encoding[:errors] used for stdin/stdout/stderr. +PYPY_IRC_TOPIC: if set to a non-empty value, print a random #pypy IRC + topic at startup of interactive mode. +PYPYLOG: If set to a non-empty value, enable logging. """ try: @@ -113,6 +116,7 @@ except BaseException as e: try: + initstdio() stderr = sys.stderr print('Error calling sys.excepthook:', file=stderr) originalexcepthook(type(e), e, e.__traceback__) @@ -678,7 +682,11 @@ if inspect_requested(): try: from _pypy_interact import interactive_console - success = run_toplevel(interactive_console, mainmodule, quiet) + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' or ( + readenv and os.getenv('PYPY_IRC_TOPIC')) + success = run_toplevel(interactive_console, mainmodule, + quiet=quiet or not irc_topic) except SystemExit as e: status = e.code else: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -7,11 +7,8 @@ from rpython.tool.udir import udir from contextlib import contextmanager from pypy.conftest import pypydir -from pypy.module.sys.version import PYPY_VERSION from lib_pypy._pypy_interact import irc_header -is_release = PYPY_VERSION[3] == "final" - python3 = os.environ.get("PYTHON3", "python3") @@ -21,7 +18,6 @@ stdout=subprocess.PIPE) return p.stdout.read().rstrip() banner = get_banner() -print repr(banner) app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') app_main = os.path.abspath(app_main) @@ -255,10 +251,6 @@ child = self.spawn([]) child.expect('Python ') # banner child.expect('>>> ') # prompt - if is_release: - assert irc_header not in child.before - else: - assert irc_header in child.before child.sendline('[6*7]') child.expect(re.escape('[42]')) child.sendline('def f(x):') @@ -278,6 +270,22 @@ child.sendline("'' in sys.path") child.expect("True") + def test_yes_irc_topic(self, monkeypatch): + monkeypatch.setenv('PYPY_IRC_TOPIC', '1') + child = self.spawn([]) + child.expect(irc_header) # banner + + def test_maybe_irc_topic(self): + import sys + pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info) + irc_topic = pypy_version_info[3] != 'final' + child = self.spawn([]) + child.expect('>>>') # banner + if irc_topic: + assert irc_header in child.before + else: + assert irc_header not in child.before + def test_help(self): # test that -h prints the usage, including the name of the executable # which should be /full/path/to/app_main.py in this case @@ -1048,6 +1056,7 @@ # ---------------------------------------- from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION cpy_ver = '%d' % CPYTHON_VERSION[0] + from lib_pypy._pypy_interact import irc_header goal_dir = os.path.dirname(app_main) # build a directory hierarchy like which contains both bin/pypy-c and @@ -1067,6 +1076,7 @@ self.w_fake_exe = self.space.wrap(str(fake_exe)) self.w_expected_path = self.space.wrap(expected_path) self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir)) + self.w_is_release = self.space.wrap(PYPY_VERSION[3] == "final") self.w_tmp_dir = self.space.wrap(tmp_dir) @@ -1136,3 +1146,4 @@ # assert it did not crash finally: sys.path[:] = old_sys_path + diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "3.2.5" /* PyPy version as a string */ -#define PYPY_VERSION "2.3.0-alpha0" +#define PYPY_VERSION "2.4.0-alpha0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/fcntl/interp_fcntl.py b/pypy/module/fcntl/interp_fcntl.py --- a/pypy/module/fcntl/interp_fcntl.py +++ b/pypy/module/fcntl/interp_fcntl.py @@ -62,8 +62,8 @@ fcntl_int = external('fcntl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) fcntl_str = external('fcntl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) fcntl_flock = external('fcntl', [rffi.INT, rffi.INT, _flock], rffi.INT) -ioctl_int = external('ioctl', [rffi.INT, rffi.INT, rffi.INT], rffi.INT) -ioctl_str = external('ioctl', [rffi.INT, rffi.INT, rffi.CCHARP], rffi.INT) +ioctl_int = external('ioctl', [rffi.INT, rffi.UINT, rffi.INT], rffi.INT) +ioctl_str = external('ioctl', [rffi.INT, rffi.UINT, rffi.CCHARP], rffi.INT) has_flock = cConfig.has_flock if has_flock: diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -11,7 +11,9 @@ os.unlink(i) class AppTestFcntl: - spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', 'select', 'rctime')) + spaceconfig = dict(usemodules=('fcntl', 'array', 'struct', 'termios', + 'select', 'rctime')) + def setup_class(cls): tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_')) cls.w_tmp = cls.space.wrap(tmpprefix) @@ -257,6 +259,31 @@ os.close(mfd) os.close(sfd) + def test_ioctl_signed_unsigned_code_param(self): + import fcntl + import os + import pty + import struct + import termios + + mfd, sfd = pty.openpty() + try: + if termios.TIOCSWINSZ < 0: + set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ + set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffff + else: + set_winsz_opcode_pos = termios.TIOCSWINSZ + set_winsz_opcode_maybe_neg, = struct.unpack("i", + struct.pack("I", termios.TIOCSWINSZ)) + + our_winsz = struct.pack("HHHH",80,25,0,0) + # test both with a positive and potentially negative ioctl code + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz) + new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz) + finally: + os.close(mfd) + os.close(sfd) + def test_large_flag(self): import sys if any(plat in sys.platform diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -2,7 +2,7 @@ Implementation of the interpreter-level default import logic. """ -import sys, os, stat, genericpath +import sys, os, stat from pypy.interpreter.module import Module from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -528,8 +528,7 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) - # os.path.isdir on win32 is not rpython when pywin32 installed - if genericpath.isdir(filepart) and case_ok(filepart): + if os.path.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) if modtype in (PY_SOURCE, PY_COMPILED): diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -45,7 +45,7 @@ The variable struct.error is an exception raised on errors.""" - applevel_name = '_struct' + applevel_name = "_struct" interpleveldefs = { 'error': 'interp_struct.get_error(space)', @@ -55,9 +55,9 @@ 'pack_into': 'interp_struct.pack_into', 'unpack': 'interp_struct.unpack', 'unpack_from': 'interp_struct.unpack_from', - '_clearcache': 'interp_struct.clearcache', 'Struct': 'interp_struct.W_Struct', + '_clearcache': 'interp_struct.clearcache', } appleveldefs = { diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -21,11 +21,6 @@ return space.fromcache(Cache).error - at unwrap_spec(format=str) -def calcsize(space, format): - return space.wrap(_calcsize(space, format)) - - def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -38,7 +33,11 @@ @unwrap_spec(format=str) -def pack(space, format, args_w): +def calcsize(space, format): + return space.wrap(_calcsize(space, format)) + + +def _pack(space, format, args_w): if jit.isconstant(format): size = _calcsize(space, format) else: @@ -50,13 +49,18 @@ raise OperationError(space.w_OverflowError, space.wrap(e.msg)) except StructError, e: raise OperationError(get_error(space), space.wrap(e.msg)) - return space.wrapbytes(fmtiter.result.build()) + return fmtiter.result.build() + + + at unwrap_spec(format=str) +def pack(space, format, args_w): + return space.wrapbytes(_pack(space, format, args_w)) # XXX inefficient @unwrap_spec(format=str, offset=int) def pack_into(space, format, w_buffer, offset, args_w): - res = pack(space, format, args_w).bytes_w(space) + res = _pack(space, format, args_w) buf = space.writebuf_w(w_buffer) if offset < 0: offset += buf.getlength() @@ -140,3 +144,6 @@ pack_into=interp2app(W_Struct.descr_pack_into), unpack_from=interp2app(W_Struct.descr_unpack_from), ) + +def clearcache(space): + """No-op on PyPy""" diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 3, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 4, 0, "alpha", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -158,7 +158,15 @@ # Good default if there are no replacements. buf = StringBuilder(len("bytearray(b'')") + len(s)) - buf.append("bytearray(b'") + buf.append("bytearray(b") + quote = "'" + for c in s: + if c == '"': + quote = "'" + break + elif c == "'": + quote = '"' + buf.append(quote) for i in range(len(s)): c = s[i] @@ -180,7 +188,8 @@ else: buf.append(c) - buf.append("')") + buf.append(quote) + buf.append(")") return space.wrap(buf.build()) diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -39,22 +39,20 @@ # ignore whitespace after bracket while i < slen and s[i] == ' ': i += 1 + while slen > 0 and s[slen-1] == ' ': + slen -= 1 # extract first number realstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E') and i != realstart: + if s[i] in ('+', '-') and pc not in ('e', 'E') and i != realstart: break pc = s[i] i += 1 realstop = i - # ignore whitespace - while i < slen and s[i] == ' ': - i += 1 - # return appropriate strings is only one number is there if i >= slen: newstop = realstop - 1 @@ -76,20 +74,17 @@ # find sign for imaginary part if s[i] == '-' or s[i] == '+': imagsign = s[i] - if imagsign == ' ': + else: raise ValueError - i+=1 - # whitespace - while i < slen and s[i] == ' ': - i += 1 + i += 1 if i >= slen: raise ValueError imagstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E'): + if s[i] in ('+', '-') and pc not in ('e', 'E'): break pc = s[i] i += 1 @@ -97,14 +92,12 @@ imagstop = i - 1 if imagstop < 0: raise ValueError - if s[imagstop] not in ('j','J'): + if s[imagstop] not in ('j', 'J'): raise ValueError if imagstop < imagstart: raise ValueError - while i s_annotation - # that can be attached to booleans, exitswitches - knowntypedata = getattr(self.bindings.get(block.exitswitch), - "knowntypedata", {}) - # filter out those exceptions which cannot # occour for this specific, typed operation. if block.exitswitch == c_last_exception: @@ -480,93 +479,12 @@ exits.append(link) candidates = [c for c in candidates if c not in covered] + # mapping (exitcase, variable) -> s_annotation + # that can be attached to booleans, exitswitches + knowntypedata = getattr(self.bindings.get(block.exitswitch), + "knowntypedata", {}) for link in exits: - in_except_block = False - - last_exception_var = link.last_exception # may be None for non-exception link - last_exc_value_var = link.last_exc_value # may be None for non-exception link - - if isinstance(link.exitcase, (types.ClassType, type)) \ - and issubclass(link.exitcase, py.builtin.BaseException): - assert last_exception_var and last_exc_value_var - last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase) - last_exception_object = annmodel.SomeType() - if isinstance(last_exception_var, Constant): - last_exception_object.const = last_exception_var.value - last_exception_object.is_type_of = [last_exc_value_var] - - if isinstance(last_exception_var, Variable): - self.setbinding(last_exception_var, last_exception_object) - if isinstance(last_exc_value_var, Variable): - self.setbinding(last_exc_value_var, last_exc_value_object) - - last_exception_object = annmodel.SomeType() - if isinstance(last_exception_var, Constant): - last_exception_object.const = last_exception_var.value - #if link.exitcase is Exception: - # last_exc_value_object = annmodel.SomeObject() - #else: - last_exc_value_vars = [] - in_except_block = True - - ignore_link = False - cells = [] - renaming = {} - for a,v in zip(link.args,link.target.inputargs): - renaming.setdefault(a, []).append(v) - for a,v in zip(link.args,link.target.inputargs): - if a == last_exception_var: - assert in_except_block - cells.append(last_exception_object) - elif a == last_exc_value_var: - assert in_except_block - cells.append(last_exc_value_object) - last_exc_value_vars.append(v) - else: - cell = self.binding(a) - if (link.exitcase, a) in knowntypedata: - knownvarvalue = knowntypedata[(link.exitcase, a)] - cell = pair(cell, knownvarvalue).improve() - # ignore links that try to pass impossible values - if cell == annmodel.s_ImpossibleValue: - ignore_link = True - - if hasattr(cell,'is_type_of'): - renamed_is_type_of = [] - for v in cell.is_type_of: - new_vs = renaming.get(v,[]) - renamed_is_type_of += new_vs - assert cell.knowntype is type - newcell = annmodel.SomeType() - if cell.is_constant(): - newcell.const = cell.const - cell = newcell - cell.is_type_of = renamed_is_type_of - - if hasattr(cell, 'knowntypedata'): - renamed_knowntypedata = {} - for (value, v), s in cell.knowntypedata.items(): - new_vs = renaming.get(v, []) - for new_v in new_vs: - renamed_knowntypedata[value, new_v] = s - assert isinstance(cell, annmodel.SomeBool) - newcell = annmodel.SomeBool() - if cell.is_constant(): - newcell.const = cell.const - cell = newcell - cell.set_knowntypedata(renamed_knowntypedata) - - cells.append(cell) - - if ignore_link: - continue - - if in_except_block: - last_exception_object.is_type_of = last_exc_value_vars - - self.links_followed[link] = True - self.addpendingblock(graph, link.target, cells) - + self.follow_link(graph, link, knowntypedata) if block in self.notify: # reflow from certain positions when this block is done for callback in self.notify[block]: @@ -575,39 +493,114 @@ else: callback() + def follow_link(self, graph, link, knowntypedata): + in_except_block = False + last_exception_var = link.last_exception # may be None for non-exception link + last_exc_value_var = link.last_exc_value # may be None for non-exception link + + if isinstance(link.exitcase, (types.ClassType, type)) \ + and issubclass(link.exitcase, py.builtin.BaseException): + assert last_exception_var and last_exc_value_var + last_exc_value_object = self.bookkeeper.valueoftype(link.exitcase) + last_exception_object = annmodel.SomeType() + if isinstance(last_exception_var, Constant): + last_exception_object.const = last_exception_var.value + last_exception_object.is_type_of = [last_exc_value_var] + + if isinstance(last_exception_var, Variable): + self.setbinding(last_exception_var, last_exception_object) + if isinstance(last_exc_value_var, Variable): + self.setbinding(last_exc_value_var, last_exc_value_object) + + last_exception_object = annmodel.SomeType() + if isinstance(last_exception_var, Constant): + last_exception_object.const = last_exception_var.value + #if link.exitcase is Exception: + # last_exc_value_object = annmodel.SomeObject() + #else: + last_exc_value_vars = [] + in_except_block = True + + ignore_link = False + cells = [] + renaming = {} + for a, v in zip(link.args, link.target.inputargs): + renaming.setdefault(a, []).append(v) + for a, v in zip(link.args, link.target.inputargs): + if a == last_exception_var: + assert in_except_block + cells.append(last_exception_object) + elif a == last_exc_value_var: + assert in_except_block + cells.append(last_exc_value_object) + last_exc_value_vars.append(v) + else: + cell = self.binding(a) + if (link.exitcase, a) in knowntypedata: + knownvarvalue = knowntypedata[(link.exitcase, a)] + cell = pair(cell, knownvarvalue).improve() + # ignore links that try to pass impossible values + if cell == annmodel.s_ImpossibleValue: + ignore_link = True + + if hasattr(cell,'is_type_of'): + renamed_is_type_of = [] From noreply at buildbot.pypy.org Thu May 22 04:32:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 22 May 2014 04:32:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: avoid the call to charpsize2str if size == 0 instead Message-ID: <20140522023223.A85A51C03C4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: py3k-memoryview Changeset: r71667:a73e8f64db11 Date: 2014-05-21 16:12 -0400 http://bitbucket.org/pypy/pypy/changeset/a73e8f64db11/ Log: avoid the call to charpsize2str if size == 0 instead diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -2,7 +2,7 @@ from rpython.rlib import jit from rpython.rlib.buffer import Buffer -from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.annlowlevel import llstr @@ -653,17 +653,11 @@ array._charbuf_stop() def getslice(self, start, stop, step, size): + if size == 0: + return '' if step == 1: data = self.array._charbuf_start() try: - if not we_are_translated(): - # rffi.ptradd(NULL, ...) doesn't work untranslated. - # It returns nonsense translated, but its return value is - # unused if size == 0, which is the case if data == NULL - if self.array._buffer_as_unsigned() == 0: - assert size == 0 - return '' - return rffi.charpsize2str(rffi.ptradd(data, start), size) finally: self.array._charbuf_stop() From noreply at buildbot.pypy.org Thu May 22 08:12:04 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 08:12:04 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Use rffi.size_and_sign() here. Unsplit rffi.size_and_sign() to reduce diff with default. Message-ID: <20140522061204.9640C1C003C@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r71668:0365fc6d78ce Date: 2014-05-22 08:11 +0200 http://bitbucket.org/pypy/pypy/changeset/0365fc6d78ce/ Log: Use rffi.size_and_sign() here. Unsplit rffi.size_and_sign() to reduce diff with default. diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -909,20 +909,19 @@ # the ptr must point to an array. def size_and_sign(tp): - return sizeof(tp), is_unsigned(tp) - -def is_unsigned(tp): + size = sizeof(tp) try: - return not tp._type.SIGNED + unsigned = not tp._type.SIGNED except AttributeError: if not isinstance(tp, lltype.Primitive): - return False + unsigned = False elif tp in (lltype.Signed, FLOAT, DOUBLE, llmemory.Address): - return False + unsigned = False elif tp in (lltype.Char, lltype.UniChar, lltype.Bool): - return True + unsigned = True else: - raise AssertionError("is_unsigned(%r)" % (tp,)) + raise AssertionError("size_and_sign(%r)" % (tp,)) + return size, unsigned def sizeof(tp): """Similar to llmemory.sizeof() but tries hard to return a integer diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -350,8 +350,8 @@ for type in rffi.NUMBER_TYPES + [lltype.Char, lltype.UniChar]: if type not in PRIMITIVES: - PRIMITIVES[type] = IntegralType(rffi.sizeof(type) * 8, - rffi.is_unsigned(type)) + size_in_bytes, is_unsigned = rffi.size_and_sign(type) + PRIMITIVES[type] = IntegralType(size_in_bytes * 8, is_unsigned) LLVMSigned = PRIMITIVES[lltype.Signed] SIGNED_TYPE = LLVMSigned.repr_type() LLVMHalfWord = PRIMITIVES[llgroup.HALFWORD] From noreply at buildbot.pypy.org Thu May 22 08:16:45 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 08:16:45 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Fix test_driver.test_ctr(). Message-ID: <20140522061645.F2B891C02F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r71669:24b03deafc9e Date: 2014-05-22 08:15 +0200 http://bitbucket.org/pypy/pypy/changeset/24b03deafc9e/ Log: Fix test_driver.test_ctr(). diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -43,7 +43,7 @@ 'backendopt_lltype'] expected = ['annotate', 'backendopt', 'llinterpret', 'rtype', 'source_c', - 'compile_c', 'pyjitpl'] + 'compile_c', 'source_llvm', 'compile_llvm', 'pyjitpl'] assert set(td.exposed) == set(expected) From noreply at buildbot.pypy.org Thu May 22 08:18:11 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 May 2014 08:18:11 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: missing file Message-ID: <20140522061811.D9E651C02F3@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5260:6aae6939f54d Date: 2014-05-22 08:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/6aae6939f54d/ Log: missing file diff --git a/talk/pycon-italy-2014/speed.png b/talk/pycon-italy-2014/speed.png new file mode 100644 index 0000000000000000000000000000000000000000..2e6069d526cf1c1f874afc6cde140313a642a6ef GIT binary patch [cut] From noreply at buildbot.pypy.org Thu May 22 08:29:09 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 08:29:09 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Fix test_interactive.test_simple_source_llvm. Message-ID: <20140522062909.DFEE11C350E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r71670:198f0f083d39 Date: 2014-05-22 08:28 +0200 http://bitbucket.org/pypy/pypy/changeset/198f0f083d39/ Log: Fix test_interactive.test_simple_source_llvm. diff --git a/rpython/translator/test/test_interactive.py b/rpython/translator/test/test_interactive.py --- a/rpython/translator/test/test_interactive.py +++ b/rpython/translator/test/test_interactive.py @@ -55,15 +55,15 @@ py.test.raises(Exception, "t.source()") def test_simple_source_llvm(): - def f(x,y): - return x+y + def f(args): + return 0 - t = Translation(f, [int, int], backend='llvm') - t.source(gc='boehm') + t = Translation(f, None, backend='llvm') + t.source(gc='ref') assert 'source_llvm' in t.driver.done - t = Translation(f, [int, int]) - t.source_llvm() + t = Translation(f, None) + t.source_llvm(gc='ref') assert 'source_llvm' in t.driver.done def test_disable_logic(): From noreply at buildbot.pypy.org Thu May 22 08:42:10 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 22 May 2014 08:42:10 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Disable cpyext (and cppyy, which depends on it) when translating with the llvm backend. Message-ID: <20140522064210.A6AC21C003C@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r71671:2a72a631b974 Date: 2014-05-22 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/2a72a631b974/ Log: Disable cpyext (and cppyy, which depends on it) when translating with the llvm backend. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -335,6 +335,12 @@ # may not be present in config.objspace.usemodules at all modules = [name for name in modules if name not in essential_modules] + # the llvm translation backend currently doesn't support cpyext + # cppyy depends in cpyext + if config.translation.backend == 'llvm': + modules.remove('cpyext') + modules.remove('cppyy') + config.objspace.usemodules.suggest(**dict.fromkeys(modules, True)) def enable_translationmodules(config): From noreply at buildbot.pypy.org Thu May 22 10:30:51 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 10:30:51 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: reset cards less often Message-ID: <20140522083051.133C71C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1226:292fcb0f01df Date: 2014-05-22 09:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/292fcb0f01df/ Log: reset cards less often diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -255,10 +255,12 @@ uintptr_t card_index = 1; uintptr_t last_card_index = get_card_index(size - 1); - dprintf(("mark cards of %p, size %lu with %d\n", obj, size, mark_value)); - OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS || write_locks[first_card_index] == 255); /* see gcpage.c */ + + dprintf(("mark cards of %p, size %lu with %d, all: %d\n", + obj, size, mark_value, mark_all)); + while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; @@ -269,10 +271,9 @@ } card_index++; } - OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS - || write_locks[first_card_index] == 255); /* see gcpage.c */ realobj->stm_flags &= ~GCFLAG_CARDS_SET; + #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") } @@ -314,7 +315,7 @@ -static inline void _collect_now(object_t *obj) +static inline void _collect_now(object_t *obj, bool was_definitely_young) { assert(!_is_young(obj)); @@ -329,6 +330,14 @@ obj->stm_flags |= GCFLAG_WRITE_BARRIER; if (obj->stm_flags & GCFLAG_HAS_CARDS) { + /* all objects that had WB cleared need to be fully synchronised + on commit, so we have to mark all their cards */ + if (was_definitely_young) { + /* we don't mark cards on young objects */ + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); + return; + } + if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { /* we do not need the old cards for overflow objects */ _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), @@ -339,6 +348,7 @@ } } } if (obj->stm_flags & GCFLAG_CARDS_SET) { + assert(!was_definitely_young); _trace_card_object(obj); } } @@ -346,26 +356,29 @@ static void collect_cardrefs_to_nursery(void) { + dprintf(("collect_cardrefs_to_nursery\n")); struct list_s *lst = STM_PSEGMENT->old_objects_with_cards; while (!list_is_empty(lst)) { object_t *obj = (object_t*)list_pop_item(lst); assert(obj->stm_flags & GCFLAG_CARDS_SET); - _collect_now(obj); + _collect_now(obj, false); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); } } static void collect_oldrefs_to_nursery(void) { + dprintf(("collect_oldrefs_to_nursery\n")); struct list_s *lst = STM_PSEGMENT->objects_pointing_to_nursery; while (!list_is_empty(lst)) { uintptr_t obj_sync_now = list_pop_item(lst); object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); - _collect_now(obj); + bool was_definitely_young = (obj_sync_now & FLAG_SYNC_LARGE); + _collect_now(obj, was_definitely_young); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); if (obj_sync_now & FLAG_SYNC_LARGE) { @@ -379,9 +392,6 @@ acquire_privatization_lock(); synchronize_object_now(obj); release_privatization_lock(); - if (obj->stm_flags & GCFLAG_HAS_CARDS) { - _reset_object_cards(pseg, obj, CARD_CLEAR, false); /* was young */ - } } else { LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } @@ -395,13 +405,15 @@ static void collect_modified_old_objects(void) { + dprintf(("collect_modified_old_objects\n")); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, - _collect_now(item)); + _collect_now(item, false)); } static void collect_roots_from_markers(uintptr_t num_old) { + dprintf(("collect_roots_from_markers\n")); /* visit the marker objects */ struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); @@ -424,6 +436,7 @@ #pragma push_macro("STM_SEGMENT") #undef STM_PSEGMENT #undef STM_SEGMENT + dprintf(("throw_away_nursery\n")); /* reset the nursery by zeroing it */ size_t nursery_used; char *realnursery; From noreply at buildbot.pypy.org Thu May 22 10:30:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 10:30:52 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: move clearing of modified_old_objects to where they get pushed or reset Message-ID: <20140522083052.38E971C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1227:5e3ed01df505 Date: 2014-05-22 09:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/5e3ed01df505/ Log: move clearing of modified_old_objects to where they get pushed or reset diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -424,7 +424,7 @@ } } -static void synchronize_object_now(object_t *obj) +static void synchronize_object_now(object_t *obj, bool ignore_cards) { /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other @@ -502,6 +502,8 @@ start = (start + 4096) & ~4095; } } + + _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), obj); } static void push_overflow_objects_from_privatized_pages(void) @@ -511,7 +513,7 @@ acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_object_now(item)); + synchronize_object_now(item, true /*ignore_cards*/)); release_privatization_lock(); } @@ -533,9 +535,13 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); + if (item->stm_flags & GCFLAG_HAS_CARDS) + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + item, CARD_CLEAR, false); + /* copy the object to the shared page, and to the other private pages as needed */ - synchronize_object_now(item); + synchronize_object_now(item, false); /* don't ignore_cards */ })); release_privatization_lock(); @@ -605,6 +611,7 @@ /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); + _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { @@ -664,6 +671,9 @@ ssize_t size = stmcb_size_rounded_up((struct object_s *)src); memcpy(dst, src, size); + if (item->stm_flags & GCFLAG_HAS_CARDS) + _reset_object_cards(pseg, item, CARD_CLEAR, false); + /* objects in 'modified_old_objects' usually have the WRITE_BARRIER flag, unless they have been modified recently. Ignore the old flag; after copying from the @@ -725,6 +735,7 @@ /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(segment_num); + _verify_cards_cleared_in_all_lists(pseg); /* reset the tl->shadowstack and thread_local_obj to their original value before the transaction start */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -288,7 +288,7 @@ } static void copy_object_to_shared(object_t *obj, int source_segment_num); -static void synchronize_object_now(object_t *obj); +static void synchronize_object_now(object_t *obj, bool ignore_cards); static inline void acquire_privatization_lock(void) { diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -469,8 +469,8 @@ OPT_ASSERT(!(realobj->stm_flags & GCFLAG_CARDS_SET)); realobj->stm_flags |= GCFLAG_WRITE_BARRIER; - /* XXX: this will be necessary when only synchronising cards */ + /* logic corresponds to _collect_now() in nursery.c */ if (realobj->stm_flags & GCFLAG_HAS_CARDS) { /* We called a normal WB on these objs. If we wrote a value to some place in them, we need to @@ -493,7 +493,7 @@ OPT_ASSERT(realobj->stm_flags & GCFLAG_CARDS_SET); OPT_ASSERT(realobj->stm_flags & GCFLAG_WRITE_BARRIER); - /* XXX: this will be necessary when only synchronising cards */ + /* logic corresponds to _trace_card_object() in nursery.c */ uint8_t mark_value = IS_OVERFLOW_OBJ(pseg, realobj) ? CARD_CLEAR : CARD_MARKED_OLD; _reset_object_cards(pseg, item, mark_value, false); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -390,7 +390,7 @@ struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num); if (STM_PSEGMENT->minor_collect_will_commit_now) { acquire_privatization_lock(); - synchronize_object_now(obj); + synchronize_object_now(obj, true); /* ignore cards! */ release_privatization_lock(); } else { LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); @@ -480,21 +480,11 @@ tree_clear(pseg->nursery_objects_shadows); - /* nearly all objs in old_objects_with_cards are also in modified_old_objects, - so we don't need to go through both lists: */ - LIST_FOREACH_R(pseg->modified_old_objects, object_t * /*item*/, - { - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); - - if (realobj->stm_flags & GCFLAG_HAS_CARDS) { - /* clear all possibly used cards in this transaction */ - _reset_object_cards(pseg, item, CARD_CLEAR, false); - } - }); - /* overflow objects with cards are not in modified_old_objects */ + /* modified_old_objects' cards get cleared in push_modified_to_other_segments + or reset_modified_from_other_segments. Objs in old_objs_with_cards but not + in modified_old_objs are overflow objects and handled here: */ if (pseg->large_overflow_objects != NULL) { - /* some overflow objects may have cards, clear them too */ + /* some overflow objects may have cards when aborting, clear them too */ LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, { struct object_s *realobj = (struct object_s *) @@ -508,8 +498,6 @@ }); } - _verify_cards_cleared_in_all_lists(pseg); - return nursery_used; #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") From noreply at buildbot.pypy.org Thu May 22 10:30:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 10:30:53 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: first unoptimized pushing of only the marked cards Message-ID: <20140522083053.4DB681C02F3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1228:e5373deaa3d5 Date: 2014-05-22 10:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/e5373deaa3d5/ Log: first unoptimized pushing of only the marked cards diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -424,6 +424,121 @@ } } +static void _page_wise_synchronize_object_now(object_t *obj) +{ + uintptr_t start = (uintptr_t)obj; + uintptr_t first_page = start / 4096UL; + + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + long i, myself = STM_SEGMENT->segment_num; + + for (; first_page <= last_page; first_page++) { + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the + page's end */ + copy_size = 4096 - (start & 4095); + } + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + /* First copy the object into the shared page, if needed */ + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (is_private_page(myself, first_page)) { + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ + } + + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + + src = REAL_ADDRESS(stm_object_pages, start); + dst = REAL_ADDRESS(get_segment_base(i), start); + if (is_private_page(i, first_page)) { + /* The page is a private page. We need to diffuse this + fragment of object from the shared page to this private + page. */ + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(!memcmp(dst, src, copy_size)); /* same page */ + } + } + + start = (start + 4096) & ~4095; + } +} + +static void _card_wise_synchronize_object_now(object_t *obj) +{ + assert(obj->stm_flags & GCFLAG_HAS_CARDS); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); + assert(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)); + + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t obj_size = stmcb_size_rounded_up(realobj); + assert(obj_size >= 32); + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_card_index(obj_size - 1); + long i, myself = STM_SEGMENT->segment_num; + + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + + if (write_locks[card_lock_idx] == CARD_MARKED_OLD) { + write_locks[card_lock_idx] = CARD_CLEAR; + + uintptr_t card_byte_offset = get_card_byte_offset(card_index); + uintptr_t start = (uintptr_t)obj + card_byte_offset; + uintptr_t copy_size = CARD_SIZE; + + if (start - (uintptr_t)obj + copy_size > obj_size) { + /* don't copy over the object's bounds */ + copy_size = obj_size - (start - (uintptr_t)obj); + } + + /* copy to shared segment: */ + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + memcpy(dst, src, copy_size); + + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + + /* src = REAL_ADDRESS(stm_object_pages, start); */ + dst = REAL_ADDRESS(get_segment_base(i), start); + memcpy(dst, src, copy_size); + } + } + + card_index++; + } +} + + static void synchronize_object_now(object_t *obj, bool ignore_cards) { /* Copy around the version of 'obj' that lives in our own segment. @@ -436,71 +551,13 @@ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); assert(STM_PSEGMENT->privatization_lock == 1); - uintptr_t start = (uintptr_t)obj; - uintptr_t first_page = start / 4096UL; - if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + assert(!(obj->stm_flags & GCFLAG_HAS_CARDS)); abort();//XXX WRITE THE FAST CASE - } - else { - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); - uintptr_t end = start + obj_size; - uintptr_t last_page = (end - 1) / 4096UL; - long i, myself = STM_SEGMENT->segment_num; - - for (; first_page <= last_page; first_page++) { - - uintptr_t copy_size; - if (first_page == last_page) { - /* this is the final fragment */ - copy_size = end - start; - } - else { - /* this is a non-final fragment, going up to the - page's end */ - copy_size = 4096 - (start & 4095); - } - /* double-check that the result fits in one page */ - assert(copy_size > 0); - assert(copy_size + (start & 4095) <= 4096); - - /* First copy the object into the shared page, if needed */ - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - if (is_private_page(myself, first_page)) { - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ - } - - for (i = 1; i <= NB_SEGMENTS; i++) { - if (i == myself) - continue; - - src = REAL_ADDRESS(stm_object_pages, start); - dst = REAL_ADDRESS(get_segment_base(i), start); - if (is_private_page(i, first_page)) { - /* The page is a private page. We need to diffuse this - fragment of object from the shared page to this private - page. */ - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - assert(!memcmp(dst, src, copy_size)); /* same page */ - } - } - - start = (start + 4096) & ~4095; - } + } else if (ignore_cards || !(obj->stm_flags & GCFLAG_HAS_CARDS)) { + _page_wise_synchronize_object_now(obj); + } else { + _card_wise_synchronize_object_now(obj); } _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), obj); @@ -535,10 +592,6 @@ minor_collection() */ assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); - if (item->stm_flags & GCFLAG_HAS_CARDS) - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - item, CARD_CLEAR, false); - /* copy the object to the shared page, and to the other private pages as needed */ synchronize_object_now(item, false); /* don't ignore_cards */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -237,9 +237,16 @@ == pseg->overflow_number) static inline uintptr_t get_card_index(uintptr_t byte_offset) { - assert(_STM_CARD_SIZE == 32); + assert(CARD_SIZE == 32); return (byte_offset >> 5) + 1; } + +static inline uintptr_t get_card_byte_offset(uintptr_t card_index) { + assert(CARD_SIZE == 32); + return (card_index - 1) << 5; +} + + static inline uintptr_t get_write_lock_idx(uintptr_t obj) { uintptr_t res = (obj >> 4) - WRITELOCK_START; assert(res < sizeof(write_locks)); From noreply at buildbot.pypy.org Thu May 22 11:27:32 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 11:27:32 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: try to detect when an object is fully marked Message-ID: <20140522092732.793521C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1229:49532f4219aa Date: 2014-05-22 11:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/49532f4219aa/ Log: try to detect when an object is fully marked diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -469,7 +469,7 @@ if (i == myself) continue; - src = REAL_ADDRESS(stm_object_pages, start); + /* src = REAL_ADDRESS(stm_object_pages, start); */ dst = REAL_ADDRESS(get_segment_base(i), start); if (is_private_page(i, first_page)) { /* The page is a private page. We need to diffuse this @@ -504,6 +504,21 @@ uintptr_t last_card_index = get_card_index(obj_size - 1); long i, myself = STM_SEGMENT->segment_num; + /* simple heuristic to check if probably the whole object is + marked anyway so we can do page-wise synchronize */ + if (write_locks[first_card_index + 1] == CARD_MARKED_OLD + && write_locks[first_card_index + last_card_index] == CARD_MARKED_OLD + && write_locks[first_card_index + (last_card_index >> 1) + 1] == CARD_MARKED_OLD) { + + dprintf(("card_wise_sync assumes %p,size:%lu is fully marked\n", obj, obj_size)); + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, CARD_CLEAR, false); + _page_wise_synchronize_object_now(obj); + return; + } + + dprintf(("card_wise_sync syncs %p,size:%lu card-wise\n", obj, obj_size)); + while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; @@ -532,6 +547,8 @@ dst = REAL_ADDRESS(get_segment_base(i), start); memcpy(dst, src, copy_size); } + } else { + assert(write_locks[card_lock_idx] != CARD_MARKED); /* always only MARKED_OLD */ } card_index++; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -148,7 +148,7 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_GCFLAG_HAS_CARDS 0x08 #define _STM_GCFLAG_CARDS_SET 0x10 -#define _STM_CARD_SIZE 32 /* 16 may be safe too */ +#define _STM_CARD_SIZE 32 /* >= 32 */ #define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -368,7 +368,7 @@ #"SOME_MEDIUM_SIZE+16", #"SOME_LARGE_SIZE+16", ]) - with_cards = int(size) >= 32 + with_cards = int(size) >= 32 and global_state.rnd.randrange(1, 100) > 10 r = global_state.get_new_root_name(False, size, with_cards) thread_state.push_roots(ex) @@ -382,7 +382,7 @@ def op_allocate_ref(ex, global_state, thread_state): num = str(global_state.rnd.randrange(1, 100)) - with_cards = int(num) >= 4 + with_cards = int(num) >= 4 and global_state.rnd.randrange(1, 100) > 10 r = global_state.get_new_root_name(True, num, with_cards) thread_state.push_roots(ex) ex.do('%s = stm_allocate_refs(%s, %s)' % (r, num, bool(with_cards))) @@ -417,7 +417,7 @@ r = thread_state.get_random_root() trs = thread_state.transaction_state is_ref = global_state.has_ref_type(r) - has_cards = global_state.has_cards(r) + has_cards = global_state.has_cards(r) and global_state.rnd.randrange(1, 100) > 5 # # check for possible write-write conflict: was_written = False From noreply at buildbot.pypy.org Thu May 22 11:37:12 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 11:37:12 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: first small optimisation to only copy if there are private pages involved Message-ID: <20140522093712.B619E1C3396@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1230:ddb7a7bc2c89 Date: 2014-05-22 11:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/ddb7a7bc2c89/ Log: first small optimisation to only copy if there are private pages involved diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -489,6 +489,17 @@ } } +static inline bool _has_private_page_in_range( + long seg_num, uintptr_t start, uintptr_t size) +{ + uintptr_t first_page = start / 4096UL; + uintptr_t last_page = (start + size) / 4096UL; + for (; first_page <= last_page; first_page++) + if (is_private_page(seg_num, first_page)) + return true; + return false; +} + static void _card_wise_synchronize_object_now(object_t *obj) { assert(obj->stm_flags & GCFLAG_HAS_CARDS); @@ -534,6 +545,9 @@ copy_size = obj_size - (start - (uintptr_t)obj); } + /* since we have marked cards, at least one page here must be private */ + assert(_has_private_page_in_range(myself, start, copy_size)); + /* copy to shared segment: */ char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); char *dst = REAL_ADDRESS(stm_object_pages, start); @@ -542,7 +556,8 @@ for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; - + if (!_has_private_page_in_range(i, start, copy_size)) + continue; /* src = REAL_ADDRESS(stm_object_pages, start); */ dst = REAL_ADDRESS(get_segment_base(i), start); memcpy(dst, src, copy_size); From noreply at buildbot.pypy.org Thu May 22 12:04:29 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 22 May 2014 12:04:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <20140522100429.7489F1C003C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5261:bfdebbabc951 Date: 2014-05-22 11:59 +0200 http://bitbucket.org/pypy/extradoc/changeset/bfdebbabc951/ Log: more slides diff --git a/talk/pycon-italy-2014/Makefile b/talk/pycon-italy-2014/Makefile --- a/talk/pycon-italy-2014/Makefile +++ b/talk/pycon-italy-2014/Makefile @@ -5,7 +5,8 @@ # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 talk.pdf: talk.rst author.latex title.latex stylesheet.latex - /home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit pdflatex talk.latex || exit diff --git a/talk/pycon-italy-2014/talk.rst b/talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/pycon-italy-2014/talk.rst @@ -27,6 +27,11 @@ - PyPy is healthy and alive +|pause| + +- WARNING: This talk is boring + + * "it just works" What is PyPy? -------------- @@ -63,7 +68,7 @@ - PyPy 2.1 (July 2013) - * stable ARM (thanks to Raspberry Pi foundation) + * stable ARM * py3k (3.2.3), numpy, general improvements, bugfixes @@ -77,6 +82,7 @@ * more numpy, numpy C API + PyPy: past two years (2) ------------------------- @@ -98,15 +104,35 @@ - Topaz: implementing Ruby - * most of the language implemented, "definitely faster than MRI" + * most of the language implemented + + * "definitely faster than MRI" * https://github.com/topazproject/topaz - HippyVM: implementing PHP + * ~7x faster than standard PHP + * http://hippyvm.com/ + +Fundraising campaign +--------------------- + +- py3k: 50'852 $ of 105'000 $ (48.4%) + +- numpy: 48'121 $ of 60'000 $ (80.2%) + +- STM, 1st call: 25'000 $ + +- STM, 2nd call: 2'097 $ of 80'000 $ (2.6%) + + * more on STM later + +- thank to all donors! + Current status --------------- @@ -120,13 +146,13 @@ * Native PyPy C API for embedding + * cppyy for C++ + - Lots of CFFI modules around: * pygame_cffi, psycopg2_cffi, lxml -- numpy: in-progress, tons of code works out of the box - - * no scipy yet :-/ +- numpy: in-progress (more later) Speed: 6.3x faster than CPython @@ -136,21 +162,88 @@ :scale: 47% -Current status ---------------- - -- ARM - -- CFFI - -- numpy - -- py3k - - -xxx +ARM ---- -- future: STM +- Official support since PyPy 2.1 -- Q&A +- "it just works" + +- ~7.5x faster than CPython on ARM + +- thanks to Raspberry-Pi foundation + +- distributed as part of Raspbian OS + + +numpy +----- + +- as usual, in-progress + +- ~80% of numpy implemented + + * http://buildbot.pypy.org/numpy-status/latest.html + +- just try it + +- no scipy :-/ + + +py3k +---- + +- 3.2: stable + +- 3.3: branch started, in-progress + +- some missing optimizations + + * getting better + + +CFFI +----- + +- Python <-> C interfacing done right + + * existing shared libraries + + * custom C code + +- Inspired by LuaJIT's FFI + +- Alternative to C-API, ctypes, Cython, etc. + +- Fast on CPython, super-fast on PyPy + + +cppyy +------ + +- Interface to C++ + +- Based on reflection, no need to write wrappers + +- PyPy-only, similar to PyCintex for CPython + +- Main use case: ROOT + + * http://root.cern.ch + + * "a set of OO frameworks with all the functionality needed to handle and + analyze large amounts of data in a very efficient way" + +- 3x faster than CPython + + +STM +--- + +TODO + +Q&A +--- + +Any question? + From noreply at buildbot.pypy.org Thu May 22 12:07:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 May 2014 12:07:22 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Comment Message-ID: <20140522100722.9424C1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5262:833c4c1666b9 Date: 2014-05-22 12:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/833c4c1666b9/ Log: Comment diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -79,6 +79,7 @@ \newcommand\remi[1]{\mynote{Remi}{#1}} \newcommand\cfbolz[1]{\mynote{cfbolz}{#1}} +\newcommand\arigo[1]{\mynote{arigo}{#1}} @@ -173,7 +174,9 @@ \begin{itemize}[noitemsep] \item We introduce a new software transactional memory (STM) system that performs well even on low numbers of CPUs. It uses a novel - combination of hardware features and garbage collector (GC) + combination of hardware features\arigo{"OS-level feature" maybe. + "Hardware feature" implies it only works on custom chips} + and garbage collector (GC) integration in order to keep the overhead of STM very low. \item This new STM system is used to replace the GIL in Python and is then evaluated extensively. From noreply at buildbot.pypy.org Thu May 22 13:42:45 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Thu, 22 May 2014 13:42:45 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Added and implemented the wait primitive. Therefore we added a StmProcessShadow Message-ID: <20140522114245.EA4031C1191@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: stmgc-c7 Changeset: r835:3dab35cc8af7 Date: 2014-05-22 12:40 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/3dab35cc8af7/ Log: Added and implemented the wait primitive. Therefore we added a StmProcessShadow diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -440,4 +440,4 @@ at: 14 at: 13 put: 1; at: 12 at: 14 put: 1. ((numberOfRows > 16) and: (numberOfColumns > 16)) ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1; at: 20 at: 20 put: 1; at: 20 at: 21 put: 1; at: 20 at: 22 put: 1. ]. - ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! \ No newline at end of file + ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index f61fe405b1655959263ba4b81b0437800f94fb85..afbdfb32577e6a5f8388a264d6ff6ee8e350c64b GIT binary patch [cut] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -2,7 +2,7 @@ import os import time -from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound +from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound, StmProcessShadow from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter @@ -105,10 +105,12 @@ print "New thread reporting" interp = bootstrapper.interp w_frame = bootstrapper.w_frame + w_stm_process = bootstrapper.w_stm_process + assert isinstance(interp, Interpreter) - #assert isinstance(w_frame, model.W_PointersObject) - #assert isinstance(w_stm_process, model.W_PointersObject) + assert isinstance(w_frame, model.W_PointersObject) + assert isinstance(w_stm_process, model.W_PointersObject) bootstrapper.num_threads += 1 print "Me is started", bootstrapper.num_threads bootstrapper.release() @@ -119,6 +121,9 @@ # interp.interpret_with_w_frame(w_frame, may_context_switch=False) time.sleep(2.5) + s_stm_process = w_stm_process.as_special_get_shadow(interp.space, StmProcessShadow) + s_stm_process.lock.release() + # Signal waiting processes #wrapper.StmProcessWrapper(interp.space, w_stm_process).signal('thread') @@ -182,7 +187,7 @@ new_interp.interrupt_check_counter = self.interrupt_check_counter new_interp.trace_proxy = self.trace_proxy - bootstrapper.acquire(new_interp, None, None) + bootstrapper.acquire(new_interp, w_frame, w_stm_process) rthread.start_new_thread(bootstrapper.bootstrap, ()) def interpret_with_w_frame(self, w_frame): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1470,8 +1470,24 @@ print "STM_FORK primitive called" - wrapper.StmProcessWrapper(interp.space, w_rcvr).fork(s_frame.w_self()) - rstm.should_break_transaction() + if not isinstance(w_rcvr, model.W_PointersObject): + raise PrimitiveFailedError("Fork primitive was not called on an StmProcess") + process_shadow = w_rcvr.as_special_get_shadow(interp.space, shadow.StmProcessShadow) + process_shadow.fork(s_frame.w_self()) + + at expose_primitive(STM_WAIT, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_WAIT primitive called" + + if not isinstance(w_rcvr, model.W_PointersObject): + raise PrimitiveFailedError("Join primitive was not called on an StmProcess") + process_shadow = w_rcvr.as_special_get_shadow(interp.space, shadow.StmProcessShadow) + process_shadow.join(True) + + print "STM Rendezvous" + print "Should break: %s" % rstm.should_break_transaction() # ___________________________________________________________________________ # BlockClosure Primitives diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -2,7 +2,7 @@ from spyvm import model, constants, error, wrapper, version from spyvm.version import elidable_for_version, constant_for_version from rpython.tool.pairtype import extendabletype -from rpython.rlib import rarithmetic, jit +from rpython.rlib import rarithmetic, jit, rthread from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.debug import make_sure_not_resized @@ -1122,3 +1122,21 @@ self.dependent = dependent def update(self): pass + + +class StmProcessShadow(AbstractShadow): + + def __init__(self, space, w_self): + AbstractShadow.__init__(self, space, w_self) + self.lock = rthread.allocate_lock() + + def fork(self, w_current_frame): + from spyvm.interpreter import STMForkException + self.lock.acquire(True) + wrapper.StmProcessWrapper(self.space, self.w_self()).fork(w_current_frame) + + def join(self, blocking): + lock_result = self.lock.acquire(blocking) + if lock_result: + self.lock.release() + return lock_result \ No newline at end of file diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -145,8 +145,6 @@ assert isinstance(w_frame, model.W_PointersObject) print "Breaking interpreter loop for forking" - # we need to pass control to the interpreter loop here - # self.store_lock(1) Needed for join call raise STMForkException(w_frame, self._w_self) class LinkedListWrapper(Wrapper): From noreply at buildbot.pypy.org Thu May 22 14:08:18 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Thu, 22 May 2014 14:08:18 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: added actual interpretation to forked stm process. changed stmprocess class in the image Message-ID: <20140522120818.942731D2D48@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: stmgc-c7 Changeset: r836:9e020878c717 Date: 2014-05-22 13:07 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9e020878c717/ Log: added actual interpretation to forked stm process. changed stmprocess class in the image diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -440,4 +440,4 @@ at: 14 at: 13 put: 1; at: 12 at: 14 put: 1. ((numberOfRows > 16) and: (numberOfColumns > 16)) ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1; at: 20 at: 20 put: 1; at: 20 at: 21 put: 1; at: 20 at: 22 put: 1. ]. - ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! \ No newline at end of file + ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! ----STARTUP----{22 May 2014 . 1:53:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:58'! newStmProcess ^ StmProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33568777! parallelFork ^ (self newStmProcess) fork; yourself! ! BlockClosure removeSelector: #newStmProcess! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33643726! parallelFork ^ (self newSTMProcess) fork; yourself! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33556280! primWait SPyVM print: ' Failed to wait for process!! ' self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33644052! primWait SPyVM print: ' Failed to wait for process!! '. self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 42636506! fork Transcript show: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! STMProcess removeSelector: #initialize! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:02' prior: 33643008! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new fork. p join! ! Smalltalk removeClassNamed: #StmProcess! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:03' prior: 33644952! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! ----SNAPSHOT----{22 May 2014 . 2:03:31 pm} Squeak4.5-12568.image priorSource: 88855! ----QUIT----{22 May 2014 . 2:06:55 pm} Squeak4.5-12568.image priorSource: 91148! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index afbdfb32577e6a5f8388a264d6ff6ee8e350c64b..aa8ba78091bb620da5e2b213eb8a616801bb8b73 GIT binary patch [cut] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -115,19 +115,12 @@ print "Me is started", bootstrapper.num_threads bootstrapper.release() - # ...aaaaand go! - # wrapper.StmProcessWrapper(interp.space, w_stm_process).store_lock(1) - - # interp.interpret_with_w_frame(w_frame, may_context_switch=False) - time.sleep(2.5) - - s_stm_process = w_stm_process.as_special_get_shadow(interp.space, StmProcessShadow) - s_stm_process.lock.release() - - # Signal waiting processes - #wrapper.StmProcessWrapper(interp.space, w_stm_process).signal('thread') + interp.interpret_with_w_frame(w_frame) #, may_context_switch=False # cleanup + s_stm_process = w_stm_process.as_special_get_shadow(interp.space, StmProcessShadow) + s_stm_process.signal() + bootstrapper.num_threads -= 1 bootstrap = staticmethod(bootstrap) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1139,4 +1139,7 @@ lock_result = self.lock.acquire(blocking) if lock_result: self.lock.release() - return lock_result \ No newline at end of file + return lock_result + + def signal(self): + self.lock.release() \ No newline at end of file diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -116,9 +116,6 @@ class StmProcessWrapper(ProcessWrapper): - # Mis-using priority as lock, we don't need prios :P - lock, store_lock = make_int_getter_setter(2) - def put_to_sleep(self): # Must not queue pass From noreply at buildbot.pypy.org Thu May 22 15:30:53 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 22 May 2014 15:30:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Remove assertion that doesn't hold for threaded Interpreters Message-ID: <20140522133053.5A64B1C003C@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r837:6eed9cb676ed Date: 2014-05-22 15:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6eed9cb676ed/ Log: Remove assertion that doesn't hold for threaded Interpreters diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -440,4 +440,4 @@ at: 14 at: 13 put: 1; at: 12 at: 14 put: 1. ((numberOfRows > 16) and: (numberOfColumns > 16)) ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1; at: 20 at: 20 put: 1; at: 20 at: 21 put: 1; at: 20 at: 22 put: 1. ]. - ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! ----STARTUP----{22 May 2014 . 1:53:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:58'! newStmProcess ^ StmProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33568777! parallelFork ^ (self newStmProcess) fork; yourself! ! BlockClosure removeSelector: #newStmProcess! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33643726! parallelFork ^ (self newSTMProcess) fork; yourself! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33556280! primWait SPyVM print: ' Failed to wait for process!! ' self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33644052! primWait SPyVM print: ' Failed to wait for process!! '. self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 42636506! fork Transcript show: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! STMProcess removeSelector: #initialize! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:02' prior: 33643008! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new fork. p join! ! Smalltalk removeClassNamed: #StmProcess! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:03' prior: 33644952! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! ----SNAPSHOT----{22 May 2014 . 2:03:31 pm} Squeak4.5-12568.image priorSource: 88855! ----QUIT----{22 May 2014 . 2:06:55 pm} Squeak4.5-12568.image priorSource: 91148! \ No newline at end of file + ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! ----STARTUP----{22 May 2014 . 1:53:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:58'! newStmProcess ^ StmProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33568777! parallelFork ^ (self newStmProcess) fork; yourself! ! BlockClosure removeSelector: #newStmProcess! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33643726! parallelFork ^ (self newSTMProcess) fork; yourself! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33556280! primWait SPyVM print: ' Failed to wait for process!! ' self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33644052! primWait SPyVM print: ' Failed to wait for process!! '. self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 42636506! fork Transcript show: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! STMProcess removeSelector: #initialize! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:02' prior: 33643008! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new fork. p join! ! Smalltalk removeClassNamed: #StmProcess! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:03' prior: 33644952! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! ----SNAPSHOT----{22 May 2014 . 2:03:31 pm} Squeak4.5-12568.image priorSource: 88855! ----QUIT----{22 May 2014 . 2:06:55 pm} Squeak4.5-12568.image priorSource: 91148! ----STARTUP----{22 May 2014 . 2:12:45 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 2:13:22 pm} Squeak4.5-12568.image priorSource: 91234! ----STARTUP----{22 May 2014 . 2:41:04 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:41'! paralellForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:42'! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p join! ! Integer removeSelector: #stmBenchmarkTest! ----QUIT----{22 May 2014 . 2:42:38 pm} Squeak4.5-12568.image priorSource: 91430! ----STARTUP----{22 May 2014 . 2:45:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646317! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p wait! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646745! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p wait.! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646127! paralellForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----QUIT----{22 May 2014 . 2:46:59 pm} Squeak4.5-12568.image priorSource: 92032! ----STARTUP----{22 May 2014 . 2:47:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48'! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! Integer removeSelector: #paralellForkTest! ----QUIT----{22 May 2014 . 2:48:13 pm} Squeak4.5-12568.image priorSource: 92811! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index aa8ba78091bb620da5e2b213eb8a616801bb8b73..9a8fd93e37a9bd12ba8b11507164f7afb300e822 GIT binary patch [cut] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -201,7 +201,7 @@ self._loop = True s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.remaining_stack_depth == self.max_stack_depth + #assert self.remaining_stack_depth == self.max_stack_depth # Need to save s_sender, c_loop will nil this on return s_sender = s_new_context.s_sender() try: From noreply at buildbot.pypy.org Thu May 22 15:42:24 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 22 May 2014 15:42:24 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: added -m gameOfLife switch Message-ID: <20140522134224.501871C3396@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r838:a13de6a4f41e Date: 2014-05-22 15:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a13de6a4f41e/ Log: added -m gameOfLife switch diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -440,4 +440,322 @@ at: 14 at: 13 put: 1; at: 12 at: 14 put: 1. ((numberOfRows > 16) and: (numberOfColumns > 16)) ifTrue: [ newField at: 20 at: 3 put: 1; at: 20 at: 4 put: 1; at: 21 at: 2 put: 1; at: 21 at: 5 put: 1; at: 22 at: 3 put: 1; at: 22 at: 4 put: 1; at: 20 at: 20 put: 1; at: 20 at: 21 put: 1; at: 20 at: 22 put: 1. ]. - ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! ----STARTUP----{22 May 2014 . 1:53:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:58'! newStmProcess ^ StmProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33568777! parallelFork ^ (self newStmProcess) fork; yourself! ! BlockClosure removeSelector: #newStmProcess! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33643726! parallelFork ^ (self newSTMProcess) fork; yourself! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33556280! primWait SPyVM print: ' Failed to wait for process!! ' self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33644052! primWait SPyVM print: ' Failed to wait for process!! '. self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 42636506! fork Transcript show: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! STMProcess removeSelector: #initialize! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:02' prior: 33643008! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new fork. p join! ! Smalltalk removeClassNamed: #StmProcess! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:03' prior: 33644952! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! ----SNAPSHOT----{22 May 2014 . 2:03:31 pm} Squeak4.5-12568.image priorSource: 88855! ----QUIT----{22 May 2014 . 2:06:55 pm} Squeak4.5-12568.image priorSource: 91148! ----STARTUP----{22 May 2014 . 2:12:45 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 2:13:22 pm} Squeak4.5-12568.image priorSource: 91234! ----STARTUP----{22 May 2014 . 2:41:04 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:41'! paralellForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:42'! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p join! ! Integer removeSelector: #stmBenchmarkTest! ----QUIT----{22 May 2014 . 2:42:38 pm} Squeak4.5-12568.image priorSource: 91430! ----STARTUP----{22 May 2014 . 2:45:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646317! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p wait! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646745! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p wait.! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646127! paralellForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----QUIT----{22 May 2014 . 2:46:59 pm} Squeak4.5-12568.image priorSource: 92032! ----STARTUP----{22 May 2014 . 2:47:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48'! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! Integer removeSelector: #paralellForkTest! ----QUIT----{22 May 2014 . 2:48:13 pm} Squeak4.5-12568.image priorSource: 92811! \ No newline at end of file + ^ newField! ! ----SNAPSHOT----{21 January 2014 . 2:05:40 pm} Squeak4.5-12568.image priorSource: 84405! ----STARTUP----{17 May 2014 . 11:47:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! FileStream stdout nextPutAll: 'release.st'; cr; flush.! ----SNAPSHOT----{17 May 2014 . 11:48:30 pm} Squeak4.5-12568.image priorSource: 86737! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:49'! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. "^ StmProcess new fork"! ! ----QUIT----{17 May 2014 . 11:49:16 pm} Squeak4.5-12568.image priorSource: 86999! ----STARTUP----{17 May 2014 . 11:50:46 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Process subclass: #StmProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/17/2014 23:53' prior: 33641587! stmBenchmarkTest FileStream stdout nextPutAll: 'starting stm process.'. ^ StmProcess new fork.! ! !StmProcess methodsFor: 'nil' stamp: 'hh 5/17/2014 23:54'! fork FileStream stdout nextPutAll: 'Primitive stmFork failed'. self primitiveFailed! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'hh 5/17/2014 23:55' prior: 33642264! fork self primitiveFailed! ! ----SNAPSHOT----{17 May 2014 . 11:55:14 pm} Squeak4.5-12568.image priorSource: 87255! ----QUIT----{17 May 2014 . 11:55:30 pm} Squeak4.5-12568.image priorSource: 88078! ----STARTUP----{22 May 2014 . 11:48:49 am} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:49'! wait self primitiveFailed! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 11:50' prior: 33642105! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := StmProcess new fork. p join! ! !StmProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 11:50'! join self primitiveFailed! ! StmProcess removeSelector: #wait! ----QUIT----{22 May 2014 . 11:50:32 am} Squeak4.5-12568.image priorSource: 88165! ----STARTUP----{22 May 2014 . 1:53:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:58'! newStmProcess ^ StmProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33568777! parallelFork ^ (self newStmProcess) fork; yourself! ! BlockClosure removeSelector: #newStmProcess! !BlockClosure methodsFor: 'STM' stamp: 'pre 5/22/2014 13:59' prior: 33643726! parallelFork ^ (self newSTMProcess) fork; yourself! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33556280! primWait SPyVM print: ' Failed to wait for process!! ' self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 33644052! primWait SPyVM print: ' Failed to wait for process!! '. self primitiveFailed.! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'pre 5/22/2014 14:00' prior: 42636506! fork Transcript show: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! STMProcess removeSelector: #initialize! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:02' prior: 33643008! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new fork. p join! ! Smalltalk removeClassNamed: #StmProcess! Process subclass: #STMProcess instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Process subclass: #STMProcess instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 5/22/2014 14:03' prior: 33644952! stmBenchmarkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! ----SNAPSHOT----{22 May 2014 . 2:03:31 pm} Squeak4.5-12568.image priorSource: 88855! ----QUIT----{22 May 2014 . 2:06:55 pm} Squeak4.5-12568.image priorSource: 91148! ----STARTUP----{22 May 2014 . 2:12:45 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 2:13:22 pm} Squeak4.5-12568.image priorSource: 91234! ----STARTUP----{22 May 2014 . 2:41:04 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:41'! paralellForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p join! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:42'! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p join! ! Integer removeSelector: #stmBenchmarkTest! ----QUIT----{22 May 2014 . 2:42:38 pm} Squeak4.5-12568.image priorSource: 91430! ----STARTUP----{22 May 2014 . 2:45:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646317! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p wait! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646745! stmTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := STMProcess new. p wait.! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:46' prior: 33646127! paralellForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----QUIT----{22 May 2014 . 2:46:59 pm} Squeak4.5-12568.image priorSource: 92032! ----STARTUP----{22 May 2014 . 2:47:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48'! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! Integer removeSelector: #paralellForkTest! ----QUIT----{22 May 2014 . 2:48:13 pm} Squeak4.5-12568.image priorSource: 92811! ----STARTUP----{22 May 2014 . 3:30:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:31:15 pm} Squeak4.5-12568.image priorSource: 93241! ----STARTUP----{22 May 2014 . 3:31:29 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! Object subclass: #GameOfLifeField instanceVariableNames: 'data height width' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! Object subclass: #GameOfLifeField instanceVariableNames: 'data height width' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! !GameOfLifeField methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:23' prior: 33618517! cellAliveAt: x at: y + + ^ (self at: x at: y) = 1! ! !GameOfLifeField methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:14' prior: 33618649! rows: numberOfRows columns: numberOfColumns + + self height: numberOfRows. + self width: numberOfColumns. + self data: (Matrix rows: numberOfRows columns: numberOfColumns element: 0). + ! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:25' prior: 33618905! at: x at:y + + ((x < 1) or: [x > self width]) ifTrue: [ ^ 0 ]. + ((y < 1) or: [y > self height]) ifTrue: [ ^ 0 ]. + + ^ self data at: y at: x! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:25' prior: 33619122! at: x at:y put: aValue + + self data at: y at: x put: aValue.! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:02' prior: 33619256! atRow: rowNumber put: aRow + + self data atRow: rowNumber put: aRow! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33619396! data + + ^ data! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33619484! data: anObject + + data := anObject! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33619592! height + + ^ height! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33619684! height: anObject + + height := anObject! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:52' prior: 33635345! print + + | resultString | + resultString := ''. + (1 to: self height) do: [:y | + (1 to: self width) do: [ :x | + resultString := resultString , (self data at: y at: x).]. + resultString := resultString , Character cr ]. + ^ resultString ! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33620108! rowSlice: sliceSize collect: aBlock + + ! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:28' prior: 33620221! rowwiseFrom: startRow to: endRow collect: aBlock + + | newField | + newField := GameOfLifeFieldSlice from: startRow to: endRow width: self width. + (startRow to: endRow) do: [ :y | + (1 to: self width) do: [ :x | newField at: x at: y put: (aBlock value: self value: x value: y) ] ]. + ^ newField! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33620587! width + + ^ width! ! !GameOfLifeField methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:45' prior: 33620677! width: anObject + + width := anObject! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! GameOfLifeField class instanceVariableNames: ''! !GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:44' prior: 33640462! gliderFieldRows: numberOfRows columns: numberOfColumns + + | newField | + newField := self new rows: numberOfRows columns: numberOfColumns. + + newField + at: 8 at: 5 put: 1; + at: 9 at: 5 put: 1; + at: 10 at: 5 put: 1; + at: 10 at: 4 put: 1; + at: 9 at: 3 put: 1. + + ^ newField! ! !GameOfLifeField class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 20:43' prior: 33621282! rows: numberOfRows columns: numberOfColumns + + ^ self new rows: numberOfRows columns: numberOfColumns! ! GameOfLifeField subclass: #GameOfLifeFieldSlice instanceVariableNames: 'startRow endRow' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! GameOfLifeField subclass: #GameOfLifeFieldSlice instanceVariableNames: 'startRow endRow' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! !GameOfLifeFieldSlice methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 10:22' prior: 33621802! from: startRow to: endRow width: width + + self startRow: startRow; + endRow: endRow; + width: width; + height: (endRow - startRow + 1); + data: (Matrix rows: (endRow - startRow + 1) columns: width). + + ^ self! ! !GameOfLifeFieldSlice methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 21:31' prior: 33622100! rowwiseDo: aBlock + + self startRow to: self endRow do: [ :rowNumber | + aBlock value: rowNumber value: (self data atRow: (rowNumber - self startRow) + 1). + ].! ! !GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:30' prior: 33622337! at: x at:y put: aValue + + self data at: y + 1 - self startRow at: x put: aValue.! ! !GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11' prior: 33622497! endRow + + ^ endRow! ! !GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11' prior: 33622594! endRow: anObject + + endRow := anObject! ! !GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11' prior: 33622711! startRow + + ^ startRow! ! !GameOfLifeFieldSlice methodsFor: 'accessing' stamp: 'pre 1/19/2014 21:11' prior: 33622812! startRow: anObject + + startRow := anObject! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! GameOfLifeFieldSlice class instanceVariableNames: ''! !GameOfLifeFieldSlice class methodsFor: 'as yet unclassified' stamp: 'pre 1/19/2014 20:53' prior: 33623064! from: startRow to: endRow width: width + + ^ self new from: startRow to: endRow width: width! ! Object subclass: #STMSimulation instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! Object subclass: #STMSimulation instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33623613! initialField: aGameOfLifeField + + self field: aGameOfLifeField. + self fieldNew: (GameOfLifeField rows: (aGameOfLifeField height) columns: (aGameOfLifeField width)).! ! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:50' prior: 33623859! simulateRound: roundNumber + + self processes: ((1 to: self numberOfProcesses) collect: [ :processNumber | + [| rows | + rows := self startProcess: processNumber. + rows rowwiseDo: [ :rowNumber :aRow | self fieldNew atRow: rowNumber put: aRow ]] parallelFork. ]). +! ! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33634806! simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + "Transcript show: self fieldNew print. + Transcript show: Character cr." + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33624653! startProcess: processNumber + + | endOfSlice slice startOfSlice | + slice := (self field height / self numberOfProcesses). + startOfSlice := ((processNumber - 1) * slice) + 1. + endOfSlice := processNumber * slice. + + ^ self field rowwiseFrom: startOfSlice + to: endOfSlice + collect: [ :tempField :x :y | self thumbUpOrDownAt: x at: y on: tempField ] + + + ! ! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33625092! thumbUpOrDownAt: x at: y on: tempField + | liveCellCount | + + liveCellCount := (tempField at: x - 1 at: y - 1) + + (tempField at: x + 0 at: y - 1) + + (tempField at: x + 1 at: y - 1) + + (tempField at: x - 1 at: y + 0) + + (tempField at: x + 1 at: y + 0) + + (tempField at: x - 1 at: y + 1) + + (tempField at: x + 0 at: y + 1) + + (tempField at: x + 1 at: y + 1). + + (tempField cellAliveAt: x at: y) + ifTrue: [((2 = liveCellCount) + or: [liveCellCount = 3]) + ifTrue: [^ 1] + ifFalse: [^ 0]] + ifFalse: [(liveCellCount = 3) + ifTrue: [^ 1] + ifFalse: [^ 0]]! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33625748! field + + ^ field! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33625836! field: anObject + + field := anObject! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33625944! fieldNew + + ^ fieldNew! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626038! fieldNew: anObject + + fieldNew := anObject! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626152! fieldSlices + + ^ fieldSlices! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626252! fieldSlices: anObject + + fieldSlices := anObject! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626372! numberOfProcesses + + ^ numberOfProcesses! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626484! numberOfProcesses: aNumber + + numberOfProcesses := aNumber + ! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626616! processes + + ^ processes! ! !STMSimulation methodsFor: 'accessing' stamp: 'pre 1/21/2014 09:44' prior: 33626712! processes: anObject + + processes := anObject! ! !STMSimulation methodsFor: 'initialize-release' stamp: 'pre 1/21/2014 09:44' prior: 33626837! initialize + + self processes: OrderedCollection new. + ! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! STMSimulation class instanceVariableNames: ''! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33637232! benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33627275! benchmark2 + + ^ (1 to: 5) collect: [ :i | + [ self standardSimulation2: (2 raisedTo: i) ] timeToRun ]! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33627465! standardSimulation2: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! !STMSimulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/21/2014 09:44' prior: 33637437! standardSimulation: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! Object subclass: #Simulation instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! Object subclass: #Simulation instanceVariableNames: 'processes field numberOfProcesses fieldSlices fieldNew' classVariableNames: '' poolDictionaries: '' category: 'VM-GameOfLife'! !Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:37' prior: 33628383! initialField: aGameOfLifeField + + self field: aGameOfLifeField. + self fieldNew: (GameOfLifeField rows: (aGameOfLifeField height) columns: (aGameOfLifeField width)).! ! !Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:44' prior: 33628626! simulateRound: roundNumber + + self processes: ((1 to: self numberOfProcesses) collect: [ :processNumber | + | semaphore | + semaphore := Semaphore new. + [| rows | + rows := self startProcess: processNumber. + rows rowwiseDo: [ :rowNumber :aRow | self fieldNew atRow: rowNumber put: aRow ]. + semaphore signal] fork. + semaphore ]). +! ! !Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:00' prior: 33629039! simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + "Transcript show: self fieldNew print. + Transcript show: Character cr." + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! !Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:34' prior: 33629484! startProcess: processNumber + + | endOfSlice slice startOfSlice | + slice := (self field height / self numberOfProcesses). + startOfSlice := ((processNumber - 1) * slice) + 1. + endOfSlice := processNumber * slice. + + ^ self field rowwiseFrom: startOfSlice + to: endOfSlice + collect: [ :tempField :x :y | self thumbUpOrDownAt: x at: y on: tempField ] + + + ! ! !Simulation methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 10:48' prior: 33629920! thumbUpOrDownAt: x at: y on: tempField + | liveCellCount | + + liveCellCount := (tempField at: x - 1 at: y - 1) + + (tempField at: x + 0 at: y - 1) + + (tempField at: x + 1 at: y - 1) + + (tempField at: x - 1 at: y + 0) + + (tempField at: x + 1 at: y + 0) + + (tempField at: x - 1 at: y + 1) + + (tempField at: x + 0 at: y + 1) + + (tempField at: x + 1 at: y + 1). + + (tempField cellAliveAt: x at: y) + ifTrue: [((2 = liveCellCount) + or: [liveCellCount = 3]) + ifTrue: [^ 1] + ifFalse: [^ 0]] + ifFalse: [(liveCellCount = 3) + ifTrue: [^ 1] + ifFalse: [^ 0]]! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04' prior: 33630573! field + + ^ field! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04' prior: 33630658! field: anObject + + field := anObject! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:06' prior: 33630763! fieldNew + + ^ fieldNew! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 22:06' prior: 33630854! fieldNew: anObject + + fieldNew := anObject! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:30' prior: 33630965! fieldSlices + + ^ fieldSlices! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:30' prior: 33631062! fieldSlices: anObject + + fieldSlices := anObject! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:09' prior: 33631179! numberOfProcesses + + ^ numberOfProcesses! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:09' prior: 33631288! numberOfProcesses: aNumber + + numberOfProcesses := aNumber + ! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04' prior: 33631417! processes + + ^ processes! ! !Simulation methodsFor: 'accessing' stamp: 'pre 1/19/2014 20:04' prior: 33631510! processes: anObject + + processes := anObject! ! !Simulation methodsFor: 'initialize-release' stamp: 'pre 1/19/2014 20:04' prior: 33631632! initialize + + self processes: OrderedCollection new. + ! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! Simulation class instanceVariableNames: ''! !Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:08' prior: 33637027! benchmark + + ^ (1 to: 4) collect: [ :i | + [ self standardSimulation: (2 raisedTo: i) ] timeToRun ]! ! !Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 14:09' prior: 33632061! benchmark2 + + ^ (1 to: 5) collect: [ :i | + [ self standardSimulation2: (2 raisedTo: i) ] timeToRun ]! ! !Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:59' prior: 33632248! standardSimulation2: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! !Simulation class methodsFor: 'as yet unclassified' stamp: 'pre 1/20/2014 13:59' prior: 33632524! standardSimulation: numberOfProcesses + + ^ self new + numberOfProcesses: numberOfProcesses; + initialField: (GameOfLifeField gliderFieldRows: 32 columns: 32); + simulateRounds: 5. + + ! ! ----End fileIn of /home/hub/Downloads/VM-GameOfLife.st----! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'hh 5/22/2014 15:32' prior: 33654573! simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + SPyVM print self fieldNew print. + Transcript show: Character cr. + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! !STMSimulation methodsFor: 'as yet unclassified' stamp: 'hh 5/22/2014 15:33' prior: 33663832! simulateRounds: numberOfRounds + + | swapField | + + 1 to: numberOfRounds do: [ :roundNumber | + self simulateRound: roundNumber. + self processes do: [ :semaphore | semaphore wait. ]. + + SPyVM print: (self fieldNew print). + SPyVM print: (Character cr). + + swapField := self field. + self field: self fieldNew. + self fieldNew: swapField. + ]. + + ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index 9a8fd93e37a9bd12ba8b11507164f7afb300e822..f3034333df2966ec7364ce561fe2683056b6c8e1 GIT binary patch [cut] From noreply at buildbot.pypy.org Thu May 22 16:13:55 2014 From: noreply at buildbot.pypy.org (Conrad Calmez) Date: Thu, 22 May 2014 16:13:55 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: implemented STM atomic primitives Message-ID: <20140522141355.DFCFC1C02F3@cobra.cs.uni-duesseldorf.de> Author: Conrad Calmez Branch: stmgc-c7 Changeset: r839:981f12dee8ca Date: 2014-05-22 16:10 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/981f12dee8ca/ Log: implemented STM atomic primitives diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1489,6 +1489,22 @@ print "STM Rendezvous" print "Should break: %s" % rstm.should_break_transaction() + at expose_primitive(STM_ATOMIC_ENTER, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_ATOMIC_ENTER primitive called" + + rstm.increment_atomic() + + at expose_primitive(STM_ATOMIC_LEAVE, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_ATOMIC_LEAVE primitive called" + + rstm.decrement_atomic() + # ___________________________________________________________________________ # BlockClosure Primitives From noreply at buildbot.pypy.org Thu May 22 16:13:56 2014 From: noreply at buildbot.pypy.org (Conrad Calmez) Date: Thu, 22 May 2014 16:13:56 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Automated merge with bundle:/var/folders/5q/nnfvtm9x521dpj2x2qkcnlf80000gn/T/SourceTreeTemp.N5FsUS Message-ID: <20140522141356.DED101C02F3@cobra.cs.uni-duesseldorf.de> Author: Conrad Calmez Branch: stmgc-c7 Changeset: r840:3a7a4670ff52 Date: 2014-05-22 16:11 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3a7a4670ff52/ Log: Automated merge with bundle:/var/folders/5q/nnfvtm9x521dpj2x2qkcnlf8 0000gn/T/SourceTreeTemp.N5FsUS diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1489,6 +1489,22 @@ print "STM Rendezvous" print "Should break: %s" % rstm.should_break_transaction() + at expose_primitive(STM_ATOMIC_ENTER, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_ATOMIC_ENTER primitive called" + + rstm.increment_atomic() + + at expose_primitive(STM_ATOMIC_LEAVE, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rstm + + print "STM_ATOMIC_LEAVE primitive called" + + rstm.decrement_atomic() + # ___________________________________________________________________________ # BlockClosure Primitives From noreply at buildbot.pypy.org Thu May 22 16:58:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:53 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: optimize a bit the card-wise synchronize objs Message-ID: <20140522145853.239C31C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1231:8fac7b4ff774 Date: 2014-05-22 13:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/8fac7b4ff774/ Log: optimize a bit the card-wise synchronize objs diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -516,7 +516,7 @@ long i, myself = STM_SEGMENT->segment_num; /* simple heuristic to check if probably the whole object is - marked anyway so we can do page-wise synchronize */ + marked anyway so we should do page-wise synchronize */ if (write_locks[first_card_index + 1] == CARD_MARKED_OLD && write_locks[first_card_index + last_card_index] == CARD_MARKED_OLD && write_locks[first_card_index + (last_card_index >> 1) + 1] == CARD_MARKED_OLD) { @@ -530,20 +530,38 @@ dprintf(("card_wise_sync syncs %p,size:%lu card-wise\n", obj, obj_size)); + /* Combine multiple marked cards and do a memcpy for them. We don't + try yet to use page_copy() or otherwise take into account privatization + of pages (except _has_private_page_in_range) */ + uintptr_t start = 0; + uintptr_t copy_size = 0; while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; + uintptr_t card_byte_offset = get_card_byte_offset(card_index); + uint8_t card_value = write_locks[card_lock_idx]; - if (write_locks[card_lock_idx] == CARD_MARKED_OLD) { + OPT_ASSERT(card_value != CARD_MARKED); /* always only MARKED_OLD or CLEAR */ + + if (card_value == CARD_MARKED_OLD) { write_locks[card_lock_idx] = CARD_CLEAR; - uintptr_t card_byte_offset = get_card_byte_offset(card_index); - uintptr_t start = (uintptr_t)obj + card_byte_offset; - uintptr_t copy_size = CARD_SIZE; + if (start == 0) { /* first marked card */ + start = (uintptr_t)obj + card_byte_offset; + } - if (start - (uintptr_t)obj + copy_size > obj_size) { + copy_size += CARD_SIZE; + + if ((start - (uintptr_t)obj) + copy_size > obj_size) { /* don't copy over the object's bounds */ copy_size = obj_size - (start - (uintptr_t)obj); } + } + + if (start /* something to copy */ + && (card_value != CARD_MARKED_OLD /* found non-marked card */ + || card_index == last_card_index)) { /* this is the last card */ + /* do the copying: */ + //dprintf(("copy %lu bytes\n", copy_size)); /* since we have marked cards, at least one page here must be private */ assert(_has_private_page_in_range(myself, start, copy_size)); @@ -553,6 +571,7 @@ char *dst = REAL_ADDRESS(stm_object_pages, start); memcpy(dst, src, copy_size); + /* copy to other segments */ for (i = 1; i <= NB_SEGMENTS; i++) { if (i == myself) continue; @@ -562,12 +581,22 @@ dst = REAL_ADDRESS(get_segment_base(i), start); memcpy(dst, src, copy_size); } - } else { - assert(write_locks[card_lock_idx] != CARD_MARKED); /* always only MARKED_OLD */ + + copy_size = 0; + start = 0; } card_index++; } + +#ifndef NDEBUG + char *src = REAL_ADDRESS(stm_object_pages, (uintptr_t)obj); + char *dst; + for (i = 1; i <= NB_SEGMENTS; i++) { + dst = REAL_ADDRESS(get_segment_base(i), (uintptr_t)obj); + assert(memcmp(dst, src, obj_size) == 0); + } +#endif } diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -186,3 +186,46 @@ assert not stm_was_written_card(o) assert stm_get_ref(o, 0) == p self.commit_transaction() + + def test_synchronize_objs(self): + o = stm_allocate_old(2000, True) + + self.start_transaction() + stm_set_char(o, 'a', 1000, False) + self.commit_transaction() + + self.switch(1) + + self.start_transaction() + stm_set_char(o, 'b', 1001, False) + assert stm_get_char(o, 1000) == 'a' + self.commit_transaction() + + self.switch(0) + + self.start_transaction() + assert stm_get_char(o, 1001) == 'b' + + stm_set_char(o, 'c', 1000, True) + stm_set_char(o, 'c', 1000+CARD_SIZE, True) + stm_set_char(o, 'c', 1000+CARD_SIZE*2, True) + stm_set_char(o, 'c', 1000+CARD_SIZE*3, True) + + stm_set_char(o, 'd', 1000+CARD_SIZE*10, True) + + stm_set_char(o, 'e', 1000+CARD_SIZE*12, True) + self.commit_transaction() + + self.switch(1) + + self.start_transaction() + assert stm_get_char(o, 1000) == 'c' + assert stm_get_char(o, 1000+CARD_SIZE) == 'c' + assert stm_get_char(o, 1000+CARD_SIZE*2) == 'c' + assert stm_get_char(o, 1000+CARD_SIZE*3) == 'c' + + assert stm_get_char(o, 1000+CARD_SIZE*10) == 'd' + + assert stm_get_char(o, 1000+CARD_SIZE*12) == 'e' + + self.commit_transaction() From noreply at buildbot.pypy.org Thu May 22 16:58:54 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:54 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: change of API; like in pypy CARDS are done internally Message-ID: <20140522145854.535DA1C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1232:10b45dc9310d Date: 2014-05-22 16:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/10b45dc9310d/ Log: change of API; like in pypy CARDS are done internally diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -194,7 +194,7 @@ /* tell the other to commit ASAP, since it causes aborts */ signal_other_to_commit_soon(contmgr.other_pseg); - dprintf(("abort in contention\n")); + dprintf(("abort in contention: kind %d\n", kind)); STM_SEGMENT->nursery_end = abort_category; marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -83,7 +83,6 @@ { /* is this an object from the same transaction, outside the nursery? */ if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { - assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); @@ -106,17 +105,23 @@ return false; } -void _stm_write_slowpath(object_t *obj, uintptr_t card_index) +void _stm_write_slowpath(object_t *obj) { - assert(IMPLY(!(obj->stm_flags & GCFLAG_HAS_CARDS), card_index == 0)); - assert( - IMPLY(card_index, (card_index - 1) * CARD_SIZE < stmcb_size_rounded_up( - (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, - obj)))); + _stm_write_slowpath_card(obj, 0); +} + +void _stm_write_slowpath_card(object_t *obj, uintptr_t card_index) +{ assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + if (!(obj->stm_flags & GCFLAG_HAS_CARDS)) + card_index = 0; /* assume no cards */ + + assert(IMPLY(card_index, (card_index - 1) * CARD_SIZE < stmcb_size_rounded_up( + (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)))); + if (_stm_write_slowpath_overflow_objs(obj, card_index)) return; @@ -353,6 +358,8 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ + dprintf(("write-read conflict on %p, our seg: %d, other: %ld\n", + item, STM_SEGMENT->segment_num, i)); if (write_read_contention_management(i, item)) { /* If we reach this point, we didn't abort, but we had to wait for the other thread to commit. If we diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -119,6 +119,8 @@ object_t *o = (object_t *)(p - stm_object_pages); o->stm_flags = GCFLAG_WRITE_BARRIER; + if (size_rounded_up > CARD_SIZE) + o->stm_flags |= GCFLAG_HAS_CARDS; if (testing_prebuilt_objs == NULL) testing_prebuilt_objs = list_create(); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -114,6 +114,8 @@ copy_large_object:; char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); + if (size > CARD_SIZE) + nobj->stm_flags |= GCFLAG_HAS_CARDS; nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; } @@ -149,6 +151,7 @@ /* Must trace the object later */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); + _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), nobj); } static void collect_roots_in_nursery(void) @@ -332,19 +335,20 @@ if (obj->stm_flags & GCFLAG_HAS_CARDS) { /* all objects that had WB cleared need to be fully synchronised on commit, so we have to mark all their cards */ + struct stm_priv_segment_info_s *pseg = get_priv_segment( + STM_SEGMENT->segment_num); + if (was_definitely_young) { - /* we don't mark cards on young objects */ + /* stm_wb-slowpath should never have triggered for young objs */ assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); return; } if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { /* we do not need the old cards for overflow objects */ - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - obj, CARD_CLEAR, false); + _reset_object_cards(pseg, obj, CARD_CLEAR, false); } else { - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - obj, CARD_MARKED_OLD, true); /* mark all */ + _reset_object_cards(pseg, obj, CARD_MARKED_OLD, true); /* mark all */ } } } if (obj->stm_flags & GCFLAG_CARDS_SET) { @@ -362,6 +366,7 @@ while (!list_is_empty(lst)) { object_t *obj = (object_t*)list_pop_item(lst); + assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_CARDS_SET); _collect_now(obj, false); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); @@ -640,6 +645,10 @@ char *result = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)(result - stm_object_pages); + + if (size_rounded_up > CARD_SIZE) + o->stm_flags |= GCFLAG_HAS_CARDS; + tree_insert(STM_PSEGMENT->young_outside_nursery, (uintptr_t)o, 0); memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); @@ -741,6 +750,9 @@ memcpy(realnobj, realobj, size); obj->stm_flags |= GCFLAG_HAS_SHADOW; + if (size > CARD_SIZE) /* probably not necessary */ + nobj->stm_flags |= GCFLAG_HAS_CARDS; + tree_insert(STM_PSEGMENT->nursery_objects_shadows, (uintptr_t)obj, (uintptr_t)nobj); return nobj; diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -6,6 +6,7 @@ static uint32_t highest_overflow_number; +static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj); static void _reset_object_cards(struct stm_priv_segment_info_s *pseg, object_t *obj, uint8_t mark_value, bool mark_all); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -106,7 +106,8 @@ /* this should use llvm's coldcc calling convention, but it's not exposed to C code so far */ -void _stm_write_slowpath(object_t *, uintptr_t); +void _stm_write_slowpath(object_t *); +void _stm_write_slowpath_card(object_t *, uintptr_t); object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); @@ -216,7 +217,7 @@ static inline void stm_write(object_t *obj) { if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) - _stm_write_slowpath(obj, 0); + _stm_write_slowpath(obj); } /* The following is a GC-optimized barrier that works on the granularity @@ -228,9 +229,8 @@ __attribute__((always_inline)) static inline void stm_write_card(object_t *obj, uintptr_t index) { - OPT_ASSERT(obj->stm_flags & _STM_GCFLAG_HAS_CARDS); if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) - _stm_write_slowpath(obj, index); + _stm_write_slowpath_card(obj, index); } /* Must be provided by the user of this library. @@ -268,17 +268,6 @@ return (object_t *)p; } -/* directly after allocation one can enable card marking for any - kind of object with stm_use_cards(obj). This enables the use - of stm_write/read_card() barriers that do more fine-grained - conflict detection and garbage collection. - These objects need to be at least 32bytes in size! -*/ -__attribute__((always_inline)) -static inline void stm_use_cards(object_t* o) -{ - o->stm_flags |= _STM_GCFLAG_HAS_CARDS; -} /* Allocate a weakref object. Weakref objects have a reference to an object at the byte-offset diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -43,7 +43,6 @@ object_t *stm_allocate_weakref(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); -void stm_use_cards(object_t* o); /*void stm_write_card(); use _checked_stm_write_card() instead */ @@ -352,7 +351,7 @@ GCFLAG_HAS_CARDS = lib._STM_GCFLAG_HAS_CARDS CARD_SIZE = lib._STM_CARD_SIZE # 16b at least NB_SEGMENTS = lib.STM_NB_SEGMENTS - +FAST_ALLOC = lib._STM_FAST_ALLOC class Conflict(Exception): pass @@ -365,26 +364,20 @@ def is_in_nursery(o): return lib.stm_can_move(o) -def stm_allocate_old(size, use_cards=False): +def stm_allocate_old(size): o = lib._stm_allocate_old(size) - if use_cards: - lib.stm_use_cards(o) tid = 42 + size lib._set_type_id(o, tid) return o -def stm_allocate_old_refs(n, use_cards=False): +def stm_allocate_old_refs(n): o = lib._stm_allocate_old(HDR + n * WORD) - if use_cards: - lib.stm_use_cards(o) tid = 421420 + n lib._set_type_id(o, tid) return o -def stm_allocate(size, use_cards=False): +def stm_allocate(size): o = lib.stm_allocate(size) - if use_cards: - lib.stm_use_cards(o) tid = 42 + size lib._set_type_id(o, tid) return o @@ -401,10 +394,8 @@ def stm_get_weakref(o): return lib._get_weakref(o) -def stm_allocate_refs(n, use_cards=False): +def stm_allocate_refs(n): o = lib.stm_allocate(HDR + n * WORD) - if use_cards: - lib.stm_use_cards(o) tid = 421420 + n lib._set_type_id(o, tid) return o @@ -446,7 +437,6 @@ raise Conflict() def stm_write_card(o, index): - assert stm_get_flags(o) & GCFLAG_HAS_CARDS if lib._checked_stm_write_card(o, index): raise Conflict() diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -17,14 +17,14 @@ self.switch(0) def test_simple(self): - o = stm_allocate_old(1024, True) + o = stm_allocate_old(1024) self.start_transaction() stm_read(o) stm_write(o) self.commit_transaction() def test_simple2(self): - o = stm_allocate_old(1024, True) + o = stm_allocate_old(1024) self.start_transaction() stm_write_card(o, 5) assert not stm_was_written(o) # don't remove GCFLAG_WRITE_BARRIER @@ -34,7 +34,7 @@ @py.test.mark.parametrize("k", range(3)) def test_overflow(self, k): self.start_transaction() - o = stm_allocate(1024, True) + o = stm_allocate(1024) self.push_root(o) self._collect(k) @@ -52,10 +52,10 @@ self.commit_transaction() def test_nursery(self): - o = stm_allocate_old_refs(200, True) + o = stm_allocate_old_refs(200) self.start_transaction() - p = stm_allocate(64, True) - d = stm_allocate(64, True) + p = stm_allocate(64) + d = stm_allocate(64) stm_set_ref(o, 199, p, True) # without a write-barrier: @@ -80,7 +80,7 @@ # card cleared after last collection, # so no retrace of index 199: - d2 = stm_allocate(64, True) + d2 = stm_allocate(64) # without a write-barrier: lib._set_ptr(o, 199, d2) self.push_root(o) @@ -92,7 +92,7 @@ assert dn == d2 def test_nursery2(self): - o = stm_allocate_old_refs(200, True) + o = stm_allocate_old_refs(200) self.start_transaction() p = stm_allocate(64) d = stm_allocate(64) @@ -111,7 +111,7 @@ assert not is_in_nursery(stm_get_ref(o, 100)) def test_nursery3(self): - o = stm_allocate_old_refs(200, True) + o = stm_allocate_old_refs(200) self.start_transaction() stm_minor_collect() @@ -134,7 +134,7 @@ assert stm_get_ref(o, 100) == e # not traced def test_abort_cleanup(self): - o = stm_allocate_old_refs(200, True) + o = stm_allocate_old_refs(200) self.start_transaction() stm_minor_collect() @@ -166,7 +166,7 @@ @py.test.mark.parametrize("k", range(3)) def test_major_gc(self, k): - o = stm_allocate_old_refs(200, True) + o = stm_allocate_old_refs(200) self.start_transaction() p = stm_allocate(64) stm_set_ref(o, 0, p, True) @@ -188,7 +188,7 @@ self.commit_transaction() def test_synchronize_objs(self): - o = stm_allocate_old(2000, True) + o = stm_allocate_old(2000) self.start_transaction() stm_set_char(o, 'a', 1000, False) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -36,17 +36,22 @@ # we win but cannot wait in tests... raise WriteWriteConflictNotTestable - if our_trs.inevitable: + if our_trs.start_time >= other_trs.start_time: + abort_other = False + else: + abort_other = True + + if other_trs.check_must_abort(): + abort_other = True + elif our_trs.inevitable: + abort_other = True + elif other_trs.inevitable: + abort_other = False + + if not abort_other: + our_trs.set_must_abort(objs_in_conflict) + else: other_trs.set_must_abort(objs_in_conflict) - elif other_trs.start_time < our_trs.start_time: - pass - elif not other_trs.inevitable: - other_trs.set_must_abort(objs_in_conflict) - - if not other_trs.check_must_abort(): - our_trs.set_must_abort(objs_in_conflict) - elif wait: - assert not our_trs.inevitable class TransactionState(object): @@ -227,19 +232,14 @@ self.root_numbering = 0 self.ref_type_map = {} self.root_sizes = {} - self.with_cards = {} - def get_new_root_name(self, is_ref_type, size, with_cards): + def get_new_root_name(self, is_ref_type, size): self.root_numbering += 1 r = "lp_%s_%d" % ("ref" if is_ref_type else "char", self.root_numbering) self.ref_type_map[r] = is_ref_type self.root_sizes[r] = size - self.with_cards[r] = with_cards return r - def has_cards(self, r): - return self.with_cards[r] - def has_ref_type(self, r): return self.ref_type_map[r] @@ -368,11 +368,10 @@ #"SOME_MEDIUM_SIZE+16", #"SOME_LARGE_SIZE+16", ]) - with_cards = int(size) >= 32 and global_state.rnd.randrange(1, 100) > 10 - r = global_state.get_new_root_name(False, size, with_cards) + r = global_state.get_new_root_name(False, size) thread_state.push_roots(ex) - ex.do('%s = stm_allocate(%s, %s)' % (r, size, bool(with_cards))) + ex.do('%s = stm_allocate(%s)' % (r, size)) ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, 0, True) @@ -382,10 +381,9 @@ def op_allocate_ref(ex, global_state, thread_state): num = str(global_state.rnd.randrange(1, 100)) - with_cards = int(num) >= 4 and global_state.rnd.randrange(1, 100) > 10 - r = global_state.get_new_root_name(True, num, with_cards) + r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) - ex.do('%s = stm_allocate_refs(%s, %s)' % (r, num, bool(with_cards))) + ex.do('%s = stm_allocate_refs(%s)' % (r, num)) ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])))) thread_state.transaction_state.add_root(r, "ffi.NULL", True) @@ -417,7 +415,7 @@ r = thread_state.get_random_root() trs = thread_state.transaction_state is_ref = global_state.has_ref_type(r) - has_cards = global_state.has_cards(r) and global_state.rnd.randrange(1, 100) > 5 + try_cards = global_state.rnd.randrange(1, 100) > 5 # # check for possible write-write conflict: was_written = False @@ -446,13 +444,13 @@ thread_state.abort_transaction() offset = global_state.get_root_size(r) + " - 1" if is_ref: - ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, has_cards)) + ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, try_cards)) if not aborts: - ex.do(raising_call(False, "stm_set_ref", r, "0", v, has_cards)) + ex.do(raising_call(False, "stm_set_ref", r, "0", v, try_cards)) else: - ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset, has_cards)) + ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset, try_cards)) if not aborts: - ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR", has_cards)) + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR", try_cards)) def op_read(ex, global_state, thread_state): r = thread_state.get_random_root() @@ -564,13 +562,13 @@ curr_thread = global_state.thread_states[0] for i in range(N_OBJECTS): - r = global_state.get_new_root_name(False, "384", True) - ex.do('%s = stm_allocate_old(384, True)' % r) + r = global_state.get_new_root_name(False, "384") + ex.do('%s = stm_allocate_old(384)' % r) global_state.committed_transaction_state.add_root(r, 0, False) global_state.prebuilt_roots.append(r) - r = global_state.get_new_root_name(True, "50", True) - ex.do('%s = stm_allocate_old_refs(50, True)' % r) + r = global_state.get_new_root_name(True, "50") + ex.do('%s = stm_allocate_old_refs(50)' % r) global_state.committed_transaction_state.add_root(r, "ffi.NULL", False) global_state.prebuilt_roots.append(r) global_state.committed_transaction_state.write_set = set() @@ -624,5 +622,6 @@ return test_fun for _seed in range(5000, 5100): + _seed = 5004 _fn = _make_fun(_seed) locals()[_fn.__name__] = _fn From noreply at buildbot.pypy.org Thu May 22 16:58:55 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:55 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix small bug causing test_random to fail sometimes Message-ID: <20140522145855.6AAC51C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1233:e69d7d6c2ed9 Date: 2014-05-22 16:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/e69d7d6c2ed9/ Log: fix small bug causing test_random to fail sometimes diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -280,6 +280,9 @@ TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); + /* mark slot as unread */ + ((stm_read_marker_t *)(item->addr >> 4))->rm = 0; + _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -324,7 +324,7 @@ assert HDR == 8 GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER NB_SEGMENTS = lib.STM_NB_SEGMENTS - +FAST_ALLOC = lib._STM_FAST_ALLOC class Conflict(Exception): pass diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -236,3 +236,14 @@ # the 'p1' reference is invalid now, don't try to read it. # we check that it's invalid because _stm_total_allocated() # only records one of the two objects. + + def test_clear_read_marker_for_external_young(self): + self.start_transaction() + big = stm_allocate(FAST_ALLOC + 1000) # young outside nursery + stm_read(big) + assert stm_was_read(big) + stm_minor_collect() # free young outside + assert not stm_was_read(big) + # if the read marker is not cleared, we get false conflicts + # with later transactions using the same large-malloced slot + # as our outside-nursery-obj From noreply at buildbot.pypy.org Thu May 22 16:58:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:56 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Merge default Message-ID: <20140522145856.6EC941C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1234:b6d49351889d Date: 2014-05-22 16:35 +0200 http://bitbucket.org/pypy/stmgc/changeset/b6d49351889d/ Log: Merge default diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -476,6 +476,8 @@ _reset_object_cards(pseg, obj, CARD_CLEAR, false); _cards_cleared_in_object(pseg, obj); + /* mark slot as unread */ + ((stm_read_marker_t *)(item->addr >> 4))->rm = 0; _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -236,3 +236,14 @@ # the 'p1' reference is invalid now, don't try to read it. # we check that it's invalid because _stm_total_allocated() # only records one of the two objects. + + def test_clear_read_marker_for_external_young(self): + self.start_transaction() + big = stm_allocate(FAST_ALLOC + 1000) # young outside nursery + stm_read(big) + assert stm_was_read(big) + stm_minor_collect() # free young outside + assert not stm_was_read(big) + # if the read marker is not cleared, we get false conflicts + # with later transactions using the same large-malloced slot + # as our outside-nursery-obj From noreply at buildbot.pypy.org Thu May 22 16:58:57 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:57 +0200 (CEST) Subject: [pypy-commit] stmgc default: complete fix for last commit Message-ID: <20140522145857.6BEEE1C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1235:6c47b2117314 Date: 2014-05-22 16:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/6c47b2117314/ Log: complete fix for last commit diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -482,7 +482,22 @@ static inline bool largemalloc_keep_object_at(char *data) { /* this is called by _stm_largemalloc_sweep() */ - return mark_visited_test_and_clear((object_t *)(data - stm_object_pages)); + object_t *obj = (object_t *)(data - stm_object_pages); + if (!mark_visited_test_and_clear(obj)) { +#ifndef NDEBUG + /* This is actually needed in order to avoid random write-read + conflicts with objects read and freed long in the past. Still, + it is probably rare enough so that we don't need this additional + overhead. (test_random hits it sometimes) */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + ((struct stm_read_marker_s *) + (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; + } +#endif + return false; + } + return true; } static void sweep_large_objects(void) From noreply at buildbot.pypy.org Thu May 22 16:58:58 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:58 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Merge with default Message-ID: <20140522145858.6AACD1C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1236:dbb6cb34649b Date: 2014-05-22 16:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/dbb6cb34649b/ Log: Merge with default diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -527,7 +527,22 @@ static inline bool largemalloc_keep_object_at(char *data) { /* this is called by _stm_largemalloc_sweep() */ - return mark_visited_test_and_clear((object_t *)(data - stm_object_pages)); + object_t *obj = (object_t *)(data - stm_object_pages); + if (!mark_visited_test_and_clear(obj)) { +#ifndef NDEBUG + /* This is actually needed in order to avoid random write-read + conflicts with objects read and freed long in the past. Still, + it is probably rare enough so that we don't need this additional + overhead. (test_random hits it sometimes) */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + ((struct stm_read_marker_s *) + (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; + } +#endif + return false; + } + return true; } static void sweep_large_objects(void) From noreply at buildbot.pypy.org Thu May 22 16:58:59 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 16:58:59 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: re-enable random tests Message-ID: <20140522145859.6BD6D1C35CC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1237:edcbef5b371c Date: 2014-05-22 16:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/edcbef5b371c/ Log: re-enable random tests diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -622,6 +622,5 @@ return test_fun for _seed in range(5000, 5100): - _seed = 5004 _fn = _make_fun(_seed) locals()[_fn.__name__] = _fn From noreply at buildbot.pypy.org Thu May 22 17:29:10 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 17:29:10 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: make CARD_SIZE=128 Message-ID: <20140522152910.488801D2CE1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1238:05003fd59ebb Date: 2014-05-22 17:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/05003fd59ebb/ Log: make CARD_SIZE=128 diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -237,13 +237,11 @@ == pseg->overflow_number) static inline uintptr_t get_card_index(uintptr_t byte_offset) { - assert(CARD_SIZE == 32); - return (byte_offset >> 5) + 1; + return (byte_offset / CARD_SIZE) + 1; } static inline uintptr_t get_card_byte_offset(uintptr_t card_index) { - assert(CARD_SIZE == 32); - return (card_index - 1) << 5; + return (card_index - 1) * CARD_SIZE; } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -263,7 +263,7 @@ dprintf(("mark cards of %p, size %lu with %d, all: %d\n", obj, size, mark_value, mark_all)); - + dprintf(("obj has %lu cards\n", last_card_index)); while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -83,8 +83,7 @@ { /* Check that some values are acceptable */ assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); - assert(CARD_SIZE > 0 && CARD_SIZE % 16 == 0); - assert(CARD_SIZE == 32); /* actually, it is hardcoded in some places right now.. */ + assert(CARD_SIZE >= 32 && CARD_SIZE % 16 == 0); assert(4096 <= ((uintptr_t)STM_SEGMENT)); assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -149,7 +149,7 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_GCFLAG_HAS_CARDS 0x08 #define _STM_GCFLAG_CARDS_SET 0x10 -#define _STM_CARD_SIZE 32 /* >= 32 */ +#define _STM_CARD_SIZE 128 /* >= 32 */ #define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -188,7 +188,7 @@ self.commit_transaction() def test_synchronize_objs(self): - o = stm_allocate_old(2000) + o = stm_allocate_old(1000+20*CARD_SIZE) self.start_transaction() stm_set_char(o, 'a', 1000, False) From noreply at buildbot.pypy.org Thu May 22 17:38:41 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 17:38:41 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: this is already a huge (5x) slowdown in tests compared to the previous solution Message-ID: <20140522153841.C8CDA1D2D2C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1239:7e6f29978ec6 Date: 2014-05-22 17:39 +0200 http://bitbucket.org/pypy/stmgc/changeset/7e6f29978ec6/ Log: this is already a huge (5x) slowdown in tests compared to the previous solution diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -555,6 +555,9 @@ #ifndef NDEBUG size_t i; uint8_t *s = write_locks; +# ifndef STM_TESTS + if (n > 5000) n = 5000; +# endif for (i = 0; i < n; i++) assert(s[i] == CARD_CLEAR || s[i] == CARD_MARKED || s[i] == CARD_MARKED_OLD); From noreply at buildbot.pypy.org Thu May 22 17:56:21 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 17:56:21 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: fix and remove unnecessary reset Message-ID: <20140522155621.31E711D2D2D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1240:5b981332f5c7 Date: 2014-05-22 17:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/5b981332f5c7/ Log: fix and remove unnecessary reset diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -468,16 +468,11 @@ TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { object_t *obj = (object_t*)item->addr; - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item->addr); - - assert(!_is_in_nursery(obj)); - if (realobj->stm_flags & GCFLAG_HAS_CARDS) - _reset_object_cards(pseg, obj, CARD_CLEAR, false); - _cards_cleared_in_object(pseg, obj); /* mark slot as unread */ - ((stm_read_marker_t *)(item->addr >> 4))->rm = 0; + ((struct stm_read_marker_s *) + (pseg->pub.segment_base + (((uintptr_t)obj) >> 4)))->rm = 0; + _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Thu May 22 17:56:22 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 22 May 2014 17:56:22 +0200 (CEST) Subject: [pypy-commit] stmgc default: completer fix... (I should stop now) Message-ID: <20140522155622.3C8151D2D2D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1241:70c403598485 Date: 2014-05-22 17:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/70c403598485/ Log: completer fix... (I should stop now) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -281,7 +281,8 @@ TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); /* mark slot as unread */ - ((stm_read_marker_t *)(item->addr >> 4))->rm = 0; + ((struct stm_read_marker_s *) + (pseg->pub.segment_base + (item->addr >> 4)))->rm = 0; _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; From noreply at buildbot.pypy.org Thu May 22 20:29:53 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 May 2014 20:29:53 +0200 (CEST) Subject: [pypy-commit] pypy packaging: merge heads, prefering e058ca3ed85b Message-ID: <20140522182953.D1F541C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71673:57b59fed1716 Date: 2014-05-22 19:21 +0300 http://bitbucket.org/pypy/pypy/changeset/57b59fed1716/ Log: merge heads, prefering e058ca3ed85b From noreply at buildbot.pypy.org Thu May 22 20:29:55 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 May 2014 20:29:55 +0200 (CEST) Subject: [pypy-commit] pypy packaging: append bits to LICENSE file Message-ID: <20140522182955.139331C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71674:d998fb2217c4 Date: 2014-05-22 20:44 +0300 http://bitbucket.org/pypy/pypy/changeset/d998fb2217c4/ Log: append bits to LICENSE file diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -55,27 +55,50 @@ # so no extra license needed? with open(base_file) as fid: txt = fid.read() + searches = [("bzip2","libbz2-*", "copyright"), + ("openssl", "openssl*", "copyright"), + ] + if not options.no_tk: + searches += [("tk", "tk-dev", "copyright"), + ("tcl", "tcl-dev", "copyright")] + for name, pat, file in searches: + txt += "="*40 + txt += "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" % name + dirs = glob.glob(options.license_base + "/" +pat) + if not dirs: + raise ValueError, "Could not find "+ options.license_base + "/" + pat + if len(dirs) > 2: + raise ValueError, "Multiple copies of "+pat + dir = dirs[0] + with open(os.path.join(dir, file)) as fid: + # Read up to the ---- dividing the packaging header from the actual + # copyright (bzip) or 'LICENSE ISSUES' for openssl + for line in fid: + if (line.startswith('---------') or 'LICENSE ISSUES' in line): + break + txt += line + for line in fid: + txt += line return txt def generate_license_windows(base_file, options): - # Do as cpython does with open(base_file) as fid: txt = fid.read() - shutil.copyfileobj(open("crtlicense.txt"), out) - for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), - ("openssl", "openssl-*", "LICENSE"), - ("Tcl", "tcl-8*", "license.terms"), - ("Tk", "tk-8*", "license.terms"), - ("Tix", "tix-*", "license.terms")): - txt += "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" % name - dirs = glob.glob(options.license_base + "/" +pat) - if not dirs: - raise ValueError, "Could not find "+ options.license_base + "/" + pat - if len(dirs) > 2: - raise ValueError, "Multiple copies of "+pat - dir = dirs[0] - with open(os.path.join(dir, file)) as fid: - txt += fid.read() + # shutil.copyfileobj(open("crtlicense.txt"), out) # We do not ship msvc runtime files + for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), + ("openssl", "openssl-*", "LICENSE"), + ("Tcl", "tcl-8*", "license.terms"), + ("Tk", "tk-8*", "license.terms"), + ("Tix", "tix-*", "license.terms")): + txt += "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" % name + dirs = glob.glob(options.license_base + "/" +pat) + if not dirs: + raise ValueError, "Could not find "+ options.license_base + "/" + pat + if len(dirs) > 2: + raise ValueError, "Multiple copies of "+pat + dir = dirs[0] + with open(os.path.join(dir, file)) as fid: + txt += fid.read() return txt if sys.platform == 'win32': diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -106,12 +106,21 @@ check(file2, 0644) check(pypy, 0755) -def _test_generate_license(): - from os.path import dirname, abspath +def test_generate_license(): + from os.path import dirname, abspath, join class Options(object): pass options = Options() basedir = dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))) - license = package.generate_license(str(basedir.join('LICENSE')), options) - assert 'bzlib' in license + options.no_tk = False + if sys.platform == 'win32': + # Following recommended build setup at + # http://doc.pypy.org/en/latest/windows.html#abridged-method-for-ojit-builds-using-visual-studio-2008 + options.license_base = dirname(basedir) + '/local' + else: + options.license_base = '/usr/share/doc' + license = package.generate_license(join(basedir,'LICENSE'), options) + assert 'bzip2' in license + assert 'openssl' in license + assert 'tcl' in license From noreply at buildbot.pypy.org Thu May 22 20:29:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 May 2014 20:29:52 +0200 (CEST) Subject: [pypy-commit] pypy packaging: accept positional arguments for backward compatability Message-ID: <20140522182952.968BE1C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71672:e058ca3ed85b Date: 2014-05-22 19:20 +0300 http://bitbucket.org/pypy/pypy/changeset/e058ca3ed85b/ Log: accept positional arguments for backward compatability diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -19,6 +19,7 @@ import py import fnmatch import subprocess +import glob if sys.version_info < (2,6): py.test.skip("requires 2.6 so far") @@ -49,11 +50,41 @@ os.system("chmod -R a+rX %s" % dirname) os.system("chmod -R g-w %s" % dirname) -def generate_license(base_file, options): +def generate_license_linux(base_file, options): + # We don't actually ship binaries with the pypy archive, + # so no extra license needed? with open(base_file) as fid: txt = fid.read() return txt +def generate_license_windows(base_file, options): + # Do as cpython does + with open(base_file) as fid: + txt = fid.read() + shutil.copyfileobj(open("crtlicense.txt"), out) + for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), + ("openssl", "openssl-*", "LICENSE"), + ("Tcl", "tcl-8*", "license.terms"), + ("Tk", "tk-8*", "license.terms"), + ("Tix", "tix-*", "license.terms")): + txt += "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" % name + dirs = glob.glob(options.license_base + "/" +pat) + if not dirs: + raise ValueError, "Could not find "+ options.license_base + "/" + pat + if len(dirs) > 2: + raise ValueError, "Multiple copies of "+pat + dir = dirs[0] + with open(os.path.join(dir, file)) as fid: + txt += fid.read() + return txt + +if sys.platform == 'win32': + generate_license = generate_license_windows +elif sys.platform == 'darwin': + generate_license = generate_license_linux +else: + generate_license = generate_license_linux + def create_cffi_import_libraries(pypy_c, options): modules = ['_sqlite3'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) @@ -65,18 +96,20 @@ try: subprocess.check_call([str(pypy_c), '-c', 'import ' + module]) except subprocess.CalledProcessError: - print >>sys.stderr, """Building %{0} bindings failed. + print >>sys.stderr, """Building {0} bindings failed. You can either install development headers package or add --without-{0} option to skip packaging binary CFFI extension.""".format(module) raise MissingDependenciesError(module) -def package(basedir, options): +def create_package(basedir, options): name = options.name + if not name: + name = 'pypy-nightly' rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c basedir = py.path.local(basedir) - if override_pypy_c is None: + if not override_pypy_c: basename = 'pypy-c' if sys.platform == 'win32': basename += '.exe' @@ -169,7 +202,7 @@ shutil.copyfile(str(basedir.join('lib_pypy', file)), str(pypydir.join('lib_pypy', file))) license = generate_license(str(basedir.join('LICENSE')), options) - with open(pypydir.join('LICENSE'), 'w') as LICENSE: + with open(str(pypydir.join('LICENSE')), 'w') as LICENSE: LICENSE.write(license) # spdir = pypydir.ensure('site-packages', dir=True) @@ -224,14 +257,14 @@ raise OSError('"tar" returned exit status %r' % e) finally: os.chdir(old_dir) - if copy_to_dir is not None: - print "Copying %s to %s" % (archive, copy_to_dir) - shutil.copy(archive, str(copy_to_dir)) + if options.targetdir: + print "Copying %s to %s" % (archive, options.targetdir) + shutil.copy(archive, options.targetdir) else: print "Ready in %s" % (builddir,) return builddir # for tests -if __name__ == '__main__': +def package(*args): import argparse if sys.platform == 'win32': pypy_exe = 'pypy.exe' @@ -240,6 +273,8 @@ pypy_exe = 'pypy' license_base = '/usr/share/doc' parser = argparse.ArgumentParser() + args = list(args) + args[0] = str(args[0]) parser.add_argument('--without-tk', dest='no_tk', action='store_true', help='build and package the cffi tkinter module') parser.add_argument('--without-cffi', dest='no_cffi', action='store_true', @@ -248,12 +283,31 @@ help='do not strip the exe, making it ~10MB larger') parser.add_argument('--rename_pypy_c', dest='pypy_c', type=str, default=pypy_exe, help='target executable name, defaults to "pypy"') + parser.add_argument('--archive-name', dest='name', type=str, default='', + help='pypy-VER-PLATFORM') parser.add_argument('--license_base', type=str, default=license_base, help='where to start looking for third party upstream licensing info') parser.add_argument('--builddir', type=str, default='', help='tmp dir for packaging') - options = parser.parse_args() + parser.add_argument('--targetdir', type=str, default='', + help='destination dir for archive') + parser.add_argument('--override_pypy_c', type=str, default='', + help='use as pypy exe instead of pypy/goal/pypy-c') + # Positional arguments, for backward compatability with buldbots + parser.add_argument('extra_args', help='optional interface to positional arguments', nargs=argparse.REMAINDER, + metavar='[root-pypy-dir] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path]', + ) + options = parser.parse_args(args) + # Handle positional arguments, choke if both methods are used + for i,target, default in ([1, 'name', ''], [2, 'pypy_c', pypy_exe], + [3, 'targetdir', ''], [4,'override_pypy_c', '']): + if len(options.extra_args)>i: + if getattr(options, target) != default: + print 'positional argument',i,target,'already has value',getattr(options, target) + parser.print_help() + return + setattr(options, target, options.extra_args[i]) if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"): options.nostrip = True @@ -263,5 +317,9 @@ # The import actually creates the udir directory from rpython.tool.udir import udir options.builddir = udir.ensure("build", dir=True) - assert '/' not in options.rename_pypy_c - package(basedir, options) + assert '/' not in options.pypy_c + return create_package(basedir, options) + +if __name__ == '__main__': + import sys + create_package(*sys.argv[1:]) diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py --- a/pypy/tool/release/test/test_package.py +++ b/pypy/tool/release/test/test_package.py @@ -1,7 +1,7 @@ import py from pypy.conftest import pypydir -from pypy.tool.release import package +from pypy.tool.release import package, package from pypy.module.sys.version import CPYTHON_VERSION import tarfile, zipfile, sys @@ -74,7 +74,6 @@ pypy_c.remove() def test_with_zipfile_module(): - from pypy.tool.release import package prev = package.USE_ZIPFILE_MODULE try: package.USE_ZIPFILE_MODULE = True @@ -106,3 +105,13 @@ check(file1, 0644) check(file2, 0644) check(pypy, 0755) + +def _test_generate_license(): + from os.path import dirname, abspath + class Options(object): + pass + options = Options() + basedir = dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))) + license = package.generate_license(str(basedir.join('LICENSE')), options) + assert 'bzlib' in license + From noreply at buildbot.pypy.org Thu May 22 20:29:56 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 May 2014 20:29:56 +0200 (CEST) Subject: [pypy-commit] pypy packaging: append more bits to LICENSE file, invent something for gdbm Message-ID: <20140522182956.467D51C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71675:38f4f2c65b95 Date: 2014-05-22 21:10 +0300 http://bitbucket.org/pypy/pypy/changeset/38f4f2c65b95/ Log: append more bits to LICENSE file, invent something for gdbm diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -55,14 +55,15 @@ # so no extra license needed? with open(base_file) as fid: txt = fid.read() - searches = [("bzip2","libbz2-*", "copyright"), - ("openssl", "openssl*", "copyright"), + searches = [("bzip2","libbz2-*", "copyright", '---------'), + ("openssl", "openssl*", "copyright", 'LICENSE ISSUES'), ] if not options.no_tk: - searches += [("tk", "tk-dev", "copyright"), - ("tcl", "tcl-dev", "copyright")] - for name, pat, file in searches: - txt += "="*40 + searches += [("tk", "tk-dev", "copyright", "Copyright"), + ("tcl", "tcl-dev", "copyright", "Copyright")] + for name, pat, fname, first_line in searches: + txt += "License for '" + name + "'" + txt += '\n' + "="*(14 + len(name)) + '\n' txt += "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" % name dirs = glob.glob(options.license_base + "/" +pat) if not dirs: @@ -70,15 +71,30 @@ if len(dirs) > 2: raise ValueError, "Multiple copies of "+pat dir = dirs[0] - with open(os.path.join(dir, file)) as fid: - # Read up to the ---- dividing the packaging header from the actual - # copyright (bzip) or 'LICENSE ISSUES' for openssl + with open(os.path.join(dir, fname)) as fid: + # Read up to the line dividing the packaging header from the actual copyright for line in fid: - if (line.startswith('---------') or 'LICENSE ISSUES' in line): + if first_line in line: break txt += line for line in fid: txt += line + if len(line.strip())<1: + txt += '\n' + # Do something for gdbm, which is GPL + txt += '''\n\nLicenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and acknowledgements +for third-party software incorporated in the Python distribution. + +''' + txt += '''gdbm +---- + +The gdbm module includes code from gdbm.h, which is distributed under th terms +of the GPL license version 2 or any later version. +''' return txt def generate_license_windows(base_file, options): @@ -345,4 +361,4 @@ if __name__ == '__main__': import sys - create_package(*sys.argv[1:]) + package(*sys.argv[1:]) From noreply at buildbot.pypy.org Thu May 22 20:29:57 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 May 2014 20:29:57 +0200 (CEST) Subject: [pypy-commit] pypy packaging: typos Message-ID: <20140522182957.664871C02F3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71676:9cec6b2bd201 Date: 2014-05-22 21:29 +0300 http://bitbucket.org/pypy/pypy/changeset/9cec6b2bd201/ Log: typos diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -86,13 +86,13 @@ ======================================================= This section is an incomplete, but growing list of licenses and acknowledgements -for third-party software incorporated in the Python distribution. +for third-party software incorporated in the PyPy distribution. ''' txt += '''gdbm ---- -The gdbm module includes code from gdbm.h, which is distributed under th terms +The gdbm module includes code from gdbm.h, which is distributed under the terms of the GPL license version 2 or any later version. ''' return txt From noreply at buildbot.pypy.org Thu May 22 23:37:36 2014 From: noreply at buildbot.pypy.org (ISF) Date: Thu, 22 May 2014 23:37:36 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Revert signal change in StackLocation Message-ID: <20140522213736.E17891C350E@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71677:5ad3b78714d1 Date: 2014-05-21 13:40 +0000 http://bitbucket.org/pypy/pypy/changeset/5ad3b78714d1/ Log: Revert signal change in StackLocation diff --git a/rpython/jit/backend/ppc/locations.py b/rpython/jit/backend/ppc/locations.py --- a/rpython/jit/backend/ppc/locations.py +++ b/rpython/jit/backend/ppc/locations.py @@ -137,16 +137,16 @@ return True def as_key(self): - return self.position + 10000 + return -self.position + 10000 def imm(val): return ImmLocation(val) def get_spp_offset(pos): if pos < 0: - return pos * WORD + return -pos * WORD else: - return (pos + 1) * WORD + return -(pos + 1) * WORD def get_fp_offset(base_ofs, position): return base_ofs + position From noreply at buildbot.pypy.org Thu May 22 23:37:38 2014 From: noreply at buildbot.pypy.org (ISF) Date: Thu, 22 May 2014 23:37:38 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Rename _teardown to teardown Message-ID: <20140522213738.355F11C350E@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71678:31b0dfd8cca9 Date: 2014-05-22 21:34 +0000 http://bitbucket.org/pypy/pypy/changeset/31b0dfd8cca9/ Log: Rename _teardown to teardown Consistent with the other backends diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -855,7 +855,7 @@ 'loop.asm') ops_offset = self.mc.ops_offset - self._teardown() + self.teardown() debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address %x to %x (bootstrap %x)" % ( @@ -925,7 +925,7 @@ self._patch_sp_offset(sp_patch_location, rawstart) ops_offset = self.mc.ops_offset - self._teardown() + self.teardown() debug_start("jit-backend-addr") debug_print("bridge out of Guard %d has address %x to %x" % @@ -1009,7 +1009,7 @@ size += 1 return size - def _teardown(self): + def teardown(self): self.patch_list = None self.pending_guards = None self.current_clt = None From noreply at buildbot.pypy.org Thu May 22 23:37:39 2014 From: noreply at buildbot.pypy.org (ISF) Date: Thu, 22 May 2014 23:37:39 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Remove duplicated methods, use BaseAssembly in setup() Message-ID: <20140522213739.711D91C350E@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71679:089cd31fe54a Date: 2014-05-22 21:36 +0000 http://bitbucket.org/pypy/pypy/changeset/089cd31fe54a/ Log: Remove duplicated methods, use BaseAssembly in setup() diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -18,6 +18,8 @@ from rpython.jit.metainterp.history import ConstInt, BoxInt from rpython.jit.backend.llsupport import jitframe from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper +from rpython.jit.backend.llsupport.assembler import (DEBUG_COUNTER, debug_bridge, + BaseAssembler) from rpython.jit.backend.model import CompiledLoopToken from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.jit.metainterp.resoperation import rop, ResOperation @@ -691,55 +693,7 @@ return operations def setup_once(self): - gc_ll_descr = self.cpu.gc_ll_descr - gc_ll_descr.initialize() - self._build_wb_slowpath(False) - self._build_wb_slowpath(True) - if self.cpu.supports_floats: - self._build_wb_slowpath(False, withfloats=True) - self._build_wb_slowpath(True, withfloats=True) - self._build_propagate_exception_path() - if gc_ll_descr.get_malloc_slowpath_addr is not None: - self._build_malloc_slowpath() - self._build_stack_check_slowpath() - if gc_ll_descr.gcrootmap and gc_ll_descr.gcrootmap.is_shadow_stack: - self._build_release_gil(gc_ll_descr.gcrootmap) - self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) - self.exit_code_adr = self._gen_exit_path() - debug_start('jit-backend-counts') - self.set_debug(have_debug_prints()) - debug_stop('jit-backend-counts') - - def finish_once(self): - if self._debug: - debug_start('jit-backend-counts') - for i in range(len(self.loop_run_counters)): - struct = self.loop_run_counters[i] - if struct.type == 'l': - prefix = 'TargetToken(%d)' % struct.number - elif struct.type == 'b': - prefix = 'bridge ' + str(struct.number) - else: - prefix = 'entry ' + str(struct.number) - debug_print(prefix + ':' + str(struct.i)) - debug_stop('jit-backend-counts') - - # XXX: merge with x86 - def _register_counter(self, tp, number, token): - # YYY very minor leak -- we need the counters to stay alive - # forever, just because we want to report them at the end - # of the process - struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', - track_allocation=False) - struct.i = 0 - struct.type = tp - if tp == 'b' or tp == 'e': - struct.number = number - else: - assert token - struct.number = compute_unique_id(token) - self.loop_run_counters.append(struct) - return struct + BaseAssembler.setup_once(self) def _append_debugging_code(self, operations, tp, number, token): counter = self._register_counter(tp, number, token) From noreply at buildbot.pypy.org Fri May 23 02:27:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 23 May 2014 02:27:04 +0200 (CEST) Subject: [pypy-commit] pypy py3k: skip when the fsencoding can't handle this filename Message-ID: <20140523002704.45C5B1C3396@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71680:d4e74c26a617 Date: 2014-05-22 16:41 -0700 http://bitbucket.org/pypy/pypy/changeset/d4e74c26a617/ Log: skip when the fsencoding can't handle this filename diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -60,6 +60,12 @@ import _io import os path = os.path.join(self.tmpdir, '_pypy-日本') + try: + os.fsencode(path) + except UnicodeEncodeError: + import sys + skip("can't run this test with %s as filesystem encoding" % + sys.getfilesystemencoding()) exc = raises(IOError, _io.FileIO, path) expected = "[Errno 2] No such file or directory: %r" % path assert str(exc.value) == expected From noreply at buildbot.pypy.org Fri May 23 02:27:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 23 May 2014 02:27:05 +0200 (CEST) Subject: [pypy-commit] pypy py3k: also handle surrogates when hosted on a narrow build Message-ID: <20140523002705.83B821C3396@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71681:556155656b47 Date: 2014-05-22 17:26 -0700 http://bitbucket.org/pypy/pypy/changeset/556155656b47/ Log: also handle surrogates when hosted on a narrow build diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -11,6 +11,7 @@ else: MAXUNICODE = 0xffff +NARROW_HOST = not we_are_translated() and sys.maxunicode == 0xFFFF BYTEORDER = sys.byteorder # python 2.7 has a preview of py3k behavior, so those functions @@ -63,7 +64,7 @@ if MAXUNICODE > 0xFFFF: def code_to_unichr(code): - if not we_are_translated() and sys.maxunicode == 0xFFFF: + if NARROW_HOST: # Host CPython is narrow build, generate surrogates return unichr_returns_surrogate(code) else: @@ -334,7 +335,8 @@ ch2 = ord(s[pos]) # Check for low surrogate and combine the two to # form a UCS4 value - if ((allow_surrogates or MAXUNICODE < 65536) and + if ((allow_surrogates or MAXUNICODE < 65536 + or NARROW_HOST) and ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF): ch3 = ((ch - 0xD800) << 10 | (ch2 - 0xDC00)) + 0x10000 assert ch3 >= 0 @@ -1342,8 +1344,7 @@ # The following logic is enabled only if MAXUNICODE == 0xffff, or # for testing on top of a host Python where sys.maxunicode == 0xffff - if ((MAXUNICODE < 65536 or - (not we_are_translated() and sys.maxunicode < 65536)) + if ((MAXUNICODE < 65536 or NARROW_HOST) and 0xD800 <= oc < 0xDC00 and pos + 1 < size): # Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes pos += 1 From noreply at buildbot.pypy.org Fri May 23 09:54:31 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 23 May 2014 09:54:31 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: some clarifications and adding the second fastpath to stm_write_card() Message-ID: <20140523075431.033051C003C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1242:008b3c48e38b Date: 2014-05-23 09:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/008b3c48e38b/ Log: some clarifications and adding the second fastpath to stm_write_card() diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -40,7 +40,7 @@ #endif } -static void _stm_mark_card(object_t *obj, uintptr_t card_index) +void _stm_mark_card(object_t *obj, uintptr_t card_index) { assert(card_index > 0); @@ -115,12 +115,27 @@ assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(IMPLY(card_index, (card_index - 1) * CARD_SIZE < stmcb_size_rounded_up( + (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)))); + + uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); + uint8_t lock_num = STM_PSEGMENT->write_lock_num; + assert(base_lock_idx < sizeof(write_locks)); if (!(obj->stm_flags & GCFLAG_HAS_CARDS)) card_index = 0; /* assume no cards */ - assert(IMPLY(card_index, (card_index - 1) * CARD_SIZE < stmcb_size_rounded_up( - (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)))); + /* if card_index and obj->stm_flags & CARDS_SET: + directly mark the card of obj at card_index + return (no STM part needed) + -> see stmgc.h */ + /* if CARDS_SET, we entered here at least once, so we own the write_lock + OR this is an overflow object and the write_lock is not owned */ + OPT_ASSERT( + IMPLY(obj->stm_flags & GCFLAG_CARDS_SET, + (!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj) && write_locks[base_lock_idx] == lock_num) + || (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj) && write_locks[base_lock_idx] == 0) + )); if (_stm_write_slowpath_overflow_objs(obj, card_index)) return; @@ -136,9 +151,7 @@ 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ - uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); - uint8_t lock_num = STM_PSEGMENT->write_lock_num; - assert(base_lock_idx < sizeof(write_locks)); + retry: if (write_locks[base_lock_idx] == 0) { /* A lock to prevent reading garbage from diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -233,8 +233,8 @@ #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) -#define IS_OVERFLOW_OBJ(pseg, obj) ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) \ - == pseg->overflow_number) +#define IS_OVERFLOW_OBJ(pseg, obj) (((obj)->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) \ + == (pseg)->overflow_number) static inline uintptr_t get_card_index(uintptr_t byte_offset) { return (byte_offset / CARD_SIZE) + 1; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -108,6 +108,7 @@ but it's not exposed to C code so far */ void _stm_write_slowpath(object_t *); void _stm_write_slowpath_card(object_t *, uintptr_t); +void _stm_mark_card(object_t *obj, uintptr_t card_index); object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); @@ -221,16 +222,23 @@ } /* The following is a GC-optimized barrier that works on the granularity - of CARD_SIZE. It can only be used on objects one called stm_use_cards() - on. It has the same purpose as stm_write() for TM. + of CARD_SIZE. It can only be used on objects one any object, but only + helps with those that were internally marked with GCFLAG_HAS_CARDS + It has the same purpose as stm_write() for TM. 'index' is the byte-offset into the object divided by _STM_CARD_SIZE plus 1: (offset // CARD_SIZE) + 1 */ __attribute__((always_inline)) static inline void stm_write_card(object_t *obj, uintptr_t index) { - if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) - _stm_write_slowpath_card(obj, index); + if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) { + if (LIKELY((obj->stm_flags & _STM_GCFLAG_CARDS_SET) != 0)) { + /* XXX: check how well clang optimizes this */ + _stm_mark_card(obj, index); + } else { + _stm_write_slowpath_card(obj, index); + } + } } /* Must be provided by the user of this library. From noreply at buildbot.pypy.org Fri May 23 09:57:09 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 23 May 2014 09:57:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix translation. Message-ID: <20140523075709.12BEF1C003C@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71682:94e75ac81672 Date: 2014-05-23 09:56 +0200 http://bitbucket.org/pypy/pypy/changeset/94e75ac81672/ Log: Fix translation. diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -3,7 +3,6 @@ from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray -from pypy.objspace.std.memoryobject import _buffer_setitem from rpython.rlib.buffer import Buffer from rpython.rtyper.annlowlevel import llstr @@ -43,8 +42,6 @@ copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) -# Override the typedef to narrow down the interface that's exposed to app-level - class MiniBuffer(W_Root): def __init__(self, buffer, keepalive=None): self.buffer = buffer @@ -65,7 +62,18 @@ return space.wrapbytes(res) def descr_setitem(self, space, w_index, w_newstring): - _buffer_setitem(space, self.buffer, w_index, w_newstring) + start, stop, step, size = space.decode_index4(w_index, + self.buffer.getlength()) + if step not in (0, 1): + raise oefmt(space.w_NotImplementedError, "") + value = space.buffer_w(w_newstring, space.BUF_CONTIG_RO) + if value.getlength() != size: + raise oefmt(space.w_ValueError, + "cannot modify size of memoryview object") + if step == 0: # index only + self.buffer.setitem(start, value.getitem(0)) + elif step == 1: + self.buffer.setslice(start, value.as_str()) MiniBuffer.typedef = TypeDef( From noreply at buildbot.pypy.org Fri May 23 11:25:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 May 2014 11:25:30 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: Close this branch head as not giving the best results Message-ID: <20140523092530.8F71C1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71683:b9b7b537aefb Date: 2014-05-21 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/b9b7b537aefb/ Log: Close this branch head as not giving the best results From noreply at buildbot.pypy.org Fri May 23 11:25:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 May 2014 11:25:31 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: Try with the shadowstack inlined into the regular stack Message-ID: <20140523092531.C2D4C1C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71684:27baaf212590 Date: 2014-05-23 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/27baaf212590/ Log: Try with the shadowstack inlined into the regular stack diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Block, Link, SpaceOperation +from rpython.flowspace.model import Block, Link, Constant, SpaceOperation from rpython.annotator import model as annmodel from rpython.translator.unsimplify import varoftype, copyvar from rpython.translator.backendopt.ssa import SSA_to_SSI @@ -26,39 +26,42 @@ def transform_graph(self, graph): self._transforming_graph = graph - self._ss_graph_marker = None + self._ss_graph_marker_op = None super(ShadowStackFrameworkGCTransformer, self).transform_graph(graph) - del self._ss_graph_marker + del self._ss_graph_marker_op del self._transforming_graph def sanitize_graph(self, graph): SSA_to_SSI(graph, self.translator.annotator) - def ensure_ss_graph_marker(self): - if self._ss_graph_marker is None: + def ensure_ss_graph_marker(self, count): + c_count = Constant(count, lltype.Signed) + if self._ss_graph_marker_op is None: graph = self._transforming_graph inputargs = [copyvar(self.translator.annotator, v) for v in graph.startblock.inputargs] hblock = Block(inputargs) v_marker = varoftype(self.RPY_SHADOWSTACK_PTR) - hblock.operations.append(SpaceOperation('gc_ss_graph_marker', - [], v_marker)) + op = SpaceOperation('gc_ss_graph_marker', [c_count], v_marker) + hblock.operations.append(op) hblock.closeblock(Link(inputargs, graph.startblock)) graph.startblock = hblock - self._ss_graph_marker = v_marker - return self._ss_graph_marker + self._ss_graph_marker_op = op + elif self._ss_graph_marker_op.args[0].value < count: + self._ss_graph_marker_op.args[0] = c_count + return self._ss_graph_marker_op.result def push_roots(self, hop, keep_current_args=False): livevars = self.get_livevars_for_roots(hop, keep_current_args) self.num_pushs += len(livevars) - v_marker = self.ensure_ss_graph_marker() + v_marker = self.ensure_ss_graph_marker(len(livevars)) hop.genop("gc_ss_store", [v_marker] + livevars) return livevars def pop_roots(self, hop, livevars): # for moving collectors, reload the roots into the local variables if self.gcdata.gc.moving_gc and livevars: - v_marker = self.ensure_ss_graph_marker() + v_marker = self.ensure_ss_graph_marker(len(livevars)) hop.genop("gc_ss_reload", [v_marker] + livevars) @@ -69,15 +72,20 @@ gcdata = self.gcdata root_iterator = get_root_iterator(gctransformer) - def walk_stack_root(callback, start, end): - root_iterator.setcontext(NonConstant(llmemory.NULL)) + def walk_stack_root(callback, addr): + #root_iterator.setcontext(NonConstant(llmemory.NULL)) gc = self.gc - addr = end while True: - addr = root_iterator.nextleft(gc, start, addr) + addr += 2 + ll_assert(not (llmemory.cast_adr_to_int(addr) & (sizeofaddr-1)), + "in shadowstack: misaligned") if addr == llmemory.NULL: - return - callback(gc, addr) + break + while (addr.signed[0] & 2) == 0: + if gc.points_to_valid_gc_object(addr): + callback(gc, addr) + addr -= sizeofaddr + addr = addr.address[0] self.rootstackhook = walk_stack_root self.shadow_stack_pool = ShadowStackPool(gcdata) @@ -100,8 +108,7 @@ def walk_stack_roots(self, collect_stack_root): llop.gc_stack_top(lltype.Void) gcdata = self.gcdata - self.rootstackhook(collect_stack_root, - gcdata.root_stack_base, gcdata.root_stack_top) + self.rootstackhook(collect_stack_root, gcdata.root_stack_top) def need_thread_support(self, gctransformer, getfn): from rpython.rlib import rthread # xxx fish @@ -276,11 +283,11 @@ #MAX = 20 not implemented yet def __init__(self, gcdata): - self.unused_full_stack = llmemory.NULL + #self.unused_full_stack = llmemory.NULL self.gcdata = gcdata def initial_setup(self): - self._prepare_unused_stack() + #self._prepare_unused_stack() self.start_fresh_new_state() def allocate(self, SHADOWSTACKREF): @@ -294,7 +301,8 @@ forget_current_state(), and then call restore_state_from() or start_fresh_new_state(). """ - self._prepare_unused_stack() + raise MemoryError + #self._prepare_unused_stack() shadowstackref.base = self.gcdata.root_stack_base shadowstackref.top = self.gcdata.root_stack_top shadowstackref.context = ncontext @@ -312,14 +320,15 @@ self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore def forget_current_state(self): - ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top, - "forget_current_state: shadowstack not empty!") - if self.unused_full_stack: - llmemory.raw_free(self.unused_full_stack) - self.unused_full_stack = self.gcdata.root_stack_base + #ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top, + # "forget_current_state: shadowstack not empty!") + #if self.unused_full_stack: + # llmemory.raw_free(self.unused_full_stack) + #self.unused_full_stack = self.gcdata.root_stack_base self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore def restore_state_from(self, shadowstackref): + raise MemoryError ll_assert(bool(shadowstackref.base), "empty shadowstackref!") ll_assert(shadowstackref.base <= shadowstackref.top, "restore_state_from: broken shadowstack") @@ -328,9 +337,11 @@ self._cleanup(shadowstackref) def start_fresh_new_state(self): - self.gcdata.root_stack_base = self.unused_full_stack - self.gcdata.root_stack_top = self.unused_full_stack - self.unused_full_stack = llmemory.NULL + #self.gcdata.root_stack_base = self.unused_full_stack + #self.gcdata.root_stack_top = self.unused_full_stack + #self.unused_full_stack = llmemory.NULL + self.gcdata.root_stack_top = llmemory.NULL + self.gcdata.root_stack_top -= 2 llop.gc_stack_bottom(lltype.Void) def _cleanup(self, shadowstackref): @@ -338,12 +349,12 @@ shadowstackref.top = llmemory.NULL shadowstackref.context = llmemory.NULL - def _prepare_unused_stack(self): - if self.unused_full_stack == llmemory.NULL: - root_stack_size = sizeofaddr * self.root_stack_depth - self.unused_full_stack = llmemory.raw_malloc(root_stack_size) - if self.unused_full_stack == llmemory.NULL: - raise MemoryError + ## def _prepare_unused_stack(self): + ## if self.unused_full_stack == llmemory.NULL: + ## root_stack_size = sizeofaddr * self.root_stack_depth + ## self.unused_full_stack = llmemory.raw_malloc(root_stack_size) + ## if self.unused_full_stack == llmemory.NULL: + ## raise MemoryError def get_root_iterator(gctransformer): @@ -354,8 +365,10 @@ return True def setcontext(self, context): pass - def nextleft(self, gc, start, addr): - while addr != start: + def nextleft(self, gc, addr): + assert llmemory.cast_adr_to_int(addr) & (WORD-1) == (WORD-2) + xxxxxxx + while addr != ROOT_STACK_STOP: addr -= sizeofaddr if gc.points_to_valid_gc_object(addr): return addr @@ -366,6 +379,7 @@ def get_shadowstackref(root_walker, gctransformer): + raise Exception("XXX") if hasattr(gctransformer, '_SHADOWSTACKREF'): return gctransformer._SHADOWSTACKREF diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -444,14 +444,16 @@ return shadowstack.ShadowStackFrameworkGCTransformer(self.db.translator) def OP_GC_SS_GRAPH_MARKER(self, funcgen, op): - return '%s = rpy_shadowstack;' % funcgen.expr(op.result) + marker = funcgen.expr(op.result) + count = op.args[0].value + return 'RPY_SS_GRAPH_MARKER(%s, %d);' % (marker, count) def OP_GC_SS_STORE(self, funcgen, op): marker = funcgen.expr(op.args[0]) lines = [] for i, v in enumerate(op.args[1:]): - lines.append('%s[%d].s = %s;' % (marker, i, funcgen.expr(v))) - lines.append('rpy_shadowstack = %s + %d;' % (marker, len(op.args) - 1)) + lines.append('%s[%d].s = %s;' % (marker, i + 1, funcgen.expr(v))) + lines.append('RPY_SS_STORED(%s, %d);' % (marker, len(op.args) - 1)) return '\n'.join(lines) def OP_GC_SS_RELOAD(self, funcgen, op): @@ -463,7 +465,7 @@ funcgen.expr(v), cdecl(typename, ''), marker, - i)) + i + 1)) if isinstance(v, Constant): lines[-1] = '/* %s */' % lines[-1] lines.reverse() diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -207,15 +207,25 @@ struct rpy_shadowstack_s { void *s; }; #ifdef RPY_SHADOWSTACK_REG -register struct rpy_shadowstack_s *rpy_shadowstack asm(RPY_SHADOWSTACK_REG); +register void *rpy_shadowstack asm(RPY_SHADOWSTACK_REG); #else -extern struct rpy_shadowstack_s *rpy_shadowstack; +extern void *rpy_shadowstack; #endif +#define RPY_SS_GRAPH_MARKER(marker, count) \ + ; \ + struct rpy_shadowstack_s a##marker[count + 1]; \ + a##marker[0].s = rpy_shadowstack; \ + marker = a##marker + +#define RPY_SS_STORED(marker, count) \ + rpy_shadowstack = count > 0 ? (char *)(marker + count) - 2 \ + : marker[0].s + static inline void pypy_asm_stack_bottom(void) { void *s = pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_root_stack_top; - rpy_shadowstack = (struct rpy_shadowstack_s *)s; + rpy_shadowstack = s; } static inline void pypy_asm_stack_top(void) @@ -232,10 +242,8 @@ else { \ r = NULL; \ } -#define OP_SETFIELD_EXC_TYPE(x, r) \ - rpy_shadowstack = (x) ? \ - (struct rpy_shadowstack_s *)(((char *)x) + 1) \ - : NULL +#define OP_SETFIELD_EXC_TYPE(x, r) \ + rpy_shadowstack = (x) ? ((char *)(x)) + 1 : NULL #endif From noreply at buildbot.pypy.org Fri May 23 11:25:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 May 2014 11:25:33 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: dummy merge of the closed head Message-ID: <20140523092533.01B631C003C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71685:50985ed490bf Date: 2014-05-23 11:24 +0200 http://bitbucket.org/pypy/pypy/changeset/50985ed490bf/ Log: dummy merge of the closed head From noreply at buildbot.pypy.org Fri May 23 12:03:09 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 23 May 2014 12:03:09 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: possible change in API (closer to pypy, but needs more nasty callbacks) Message-ID: <20140523100309.955AA1D27E0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1243:ea910eddec2e Date: 2014-05-23 12:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/ea910eddec2e/ Log: possible change in API (closer to pypy, but needs more nasty callbacks) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -40,9 +40,10 @@ #endif } -void _stm_mark_card(object_t *obj, uintptr_t card_index) +__attribute__((always_inline)) +void _stm_mark_card(object_t *obj, uintptr_t index) { - assert(card_index > 0); + assert(index != -1); assert(obj->stm_flags & GCFLAG_HAS_CARDS); assert(!(obj->stm_flags & GCFLAG_SMALL_UNIFORM)); /* not supported/tested */ @@ -54,8 +55,6 @@ /* we need at least one lock in addition to the STM-reserved object write-lock */ #endif - dprintf(("mark %p card %lu with %d\n", obj, card_index, CARD_MARKED)); - if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { /* not yet in the list */ if (STM_PSEGMENT->old_objects_with_cards) { @@ -69,24 +68,27 @@ /* Just acquire the corresponding lock for the next minor_collection to know what may have changed. We already own the object here or it is an overflow obj. */ - uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj) + card_index; + uintptr_t card_lock_idx = get_write_lock_idx((uintptr_t)obj); + card_lock_idx += get_index_to_card_index(index); assert(write_locks[get_write_lock_idx((uintptr_t)obj)] == 0 /* overflow obj */ || write_locks[get_write_lock_idx((uintptr_t)obj)] == STM_PSEGMENT->write_lock_num); assert(get_write_lock_idx((uintptr_t)obj) != card_lock_idx); + dprintf(("mark %p index %lu, card:%lu with %d\n", + obj, index, get_index_to_card_index(index), CARD_MARKED)); if (write_locks[card_lock_idx] != CARD_MARKED) write_locks[card_lock_idx] = CARD_MARKED; } -static bool _stm_write_slowpath_overflow_objs(object_t *obj, uintptr_t card_index) +static bool _stm_write_slowpath_overflow_objs(object_t *obj, uintptr_t index) { /* is this an object from the same transaction, outside the nursery? */ if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); - dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); + dprintf_test(("write_slowpath %p -> ovf obj_to_nurs, index:%lu\n", obj, index)); - if (!card_index) { + if (index == -1) { /* no card to be marked, don't call again until next collection */ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); @@ -94,7 +96,7 @@ /* don't remove GCFLAG_WRITE_BARRIER because we need to be here for every card to mark */ assert(STM_PSEGMENT->old_objects_with_cards); - _stm_mark_card(obj, card_index); + _stm_mark_card(obj, index); } /* We don't need to do anything in the STM part of the WB slowpath: */ @@ -107,26 +109,29 @@ void _stm_write_slowpath(object_t *obj) { - _stm_write_slowpath_card(obj, 0); + _stm_write_slowpath_card(obj, -1); } -void _stm_write_slowpath_card(object_t *obj, uintptr_t card_index) +void _stm_write_slowpath_card(object_t *obj, uintptr_t index) { assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - assert(IMPLY(card_index, (card_index - 1) * CARD_SIZE < stmcb_size_rounded_up( - (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)))); + assert(IMPLY(index != -1, index < stmcb_size_rounded_up( + (struct object_s*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)))); uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); uint8_t lock_num = STM_PSEGMENT->write_lock_num; + bool mark_cards = index != -1; + assert(base_lock_idx < sizeof(write_locks)); + if (!(obj->stm_flags & GCFLAG_HAS_CARDS)) { + index = -1; + mark_cards = false; /* assume no cards */ + } - if (!(obj->stm_flags & GCFLAG_HAS_CARDS)) - card_index = 0; /* assume no cards */ - - /* if card_index and obj->stm_flags & CARDS_SET: - directly mark the card of obj at card_index + /* if mark_cards and obj->stm_flags & CARDS_SET: + directly mark the card of obj at index return (no STM part needed) -> see stmgc.h */ /* if CARDS_SET, we entered here at least once, so we own the write_lock @@ -137,7 +142,7 @@ || (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj) && write_locks[base_lock_idx] == 0) )); - if (_stm_write_slowpath_overflow_objs(obj, card_index)) + if (_stm_write_slowpath_overflow_objs(obj, index)) return; /* do a read-barrier now. Note that this must occur before the @@ -233,7 +238,7 @@ /* A common case for write_locks[] that was either 0 or lock_num: we need to add the object to the appropriate list if there is one. */ - if (!card_index) { + if (!mark_cards) { if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); @@ -246,7 +251,7 @@ } else { /* don't remove WRITE_BARRIER */ - _stm_mark_card(obj, card_index); + _stm_mark_card(obj, index); } /* for sanity, check again that all other segment copies of this @@ -532,7 +537,7 @@ uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; - uintptr_t last_card_index = get_card_index(obj_size - 1); + uintptr_t last_card_index = get_index_to_card_index(obj_size - 1); /* max valid index */ long i, myself = STM_SEGMENT->segment_num; /* simple heuristic to check if probably the whole object is @@ -554,10 +559,8 @@ try yet to use page_copy() or otherwise take into account privatization of pages (except _has_private_page_in_range) */ uintptr_t start = 0; - uintptr_t copy_size = 0; while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; - uintptr_t card_byte_offset = get_card_byte_offset(card_index); uint8_t card_value = write_locks[card_lock_idx]; OPT_ASSERT(card_value != CARD_MARKED); /* always only MARKED_OLD or CLEAR */ @@ -566,14 +569,8 @@ write_locks[card_lock_idx] = CARD_CLEAR; if (start == 0) { /* first marked card */ - start = (uintptr_t)obj + card_byte_offset; - } - - copy_size += CARD_SIZE; - - if ((start - (uintptr_t)obj) + copy_size > obj_size) { - /* don't copy over the object's bounds */ - copy_size = obj_size - (start - (uintptr_t)obj); + start = (uintptr_t)obj + stmcb_index_to_byte_offset( + realobj, get_card_index_to_index(card_index)); } } @@ -581,7 +578,17 @@ && (card_value != CARD_MARKED_OLD /* found non-marked card */ || card_index == last_card_index)) { /* this is the last card */ /* do the copying: */ - //dprintf(("copy %lu bytes\n", copy_size)); + uintptr_t copy_size; + + uintptr_t next_card_offset = stmcb_index_to_byte_offset( + realobj, get_card_index_to_index(card_index + 1)); + + if (next_card_offset > obj_size) + next_card_offset = obj_size; + + copy_size = next_card_offset - (start - (uintptr_t)obj); + + /* dprintf(("copy %lu bytes\n", copy_size)); */ /* since we have marked cards, at least one page here must be private */ assert(_has_private_page_in_range(myself, start, copy_size)); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -236,11 +236,11 @@ #define IS_OVERFLOW_OBJ(pseg, obj) (((obj)->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) \ == (pseg)->overflow_number) -static inline uintptr_t get_card_index(uintptr_t byte_offset) { - return (byte_offset / CARD_SIZE) + 1; +static inline uintptr_t get_index_to_card_index(uintptr_t index) { + return (index / CARD_SIZE) + 1; } -static inline uintptr_t get_card_byte_offset(uintptr_t card_index) { +static inline uintptr_t get_card_index_to_index(uintptr_t card_index) { return (card_index - 1) * CARD_SIZE; } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -197,7 +197,7 @@ uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; - uintptr_t last_card_index = get_card_index(size - 1); + uintptr_t last_card_index = get_index_to_card_index(size - 1); /* max valid index */ OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS_MAX || write_locks[first_card_index] == 255); /* see gcpage.c */ @@ -256,7 +256,7 @@ uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; - uintptr_t last_card_index = get_card_index(size - 1); + uintptr_t last_card_index = get_index_to_card_index(size - 1); /* max valid index */ OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS || write_locks[first_card_index] == 255); /* see gcpage.c */ @@ -281,39 +281,51 @@ #pragma pop_macro("STM_PSEGMENT") } -static __thread object_t *_card_base_obj; -static void minor_trace_if_young_cards(object_t **pobj) -{ - /* XXX: add a specialised stmcb_trace_cards() that - also gives the obj-base */ - assert(_card_base_obj); - uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)_card_base_obj); - uintptr_t card_lock_idx = base_lock_idx + get_card_index( - (uintptr_t)((char*)pobj - STM_SEGMENT->segment_base) - (uintptr_t)_card_base_obj); - - if (write_locks[card_lock_idx] == CARD_MARKED) { - dprintf(("minor_trace_if_young_cards: trace %p\n", *pobj)); - minor_trace_if_young(pobj); - } -} static void _trace_card_object(object_t *obj) { - /* XXX HACK XXX: */ - _card_base_obj = obj; assert(!_is_in_nursery(obj)); assert(obj->stm_flags & GCFLAG_CARDS_SET); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); dprintf(("_trace_card_object(%p)\n", obj)); - - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young_cards); - bool obj_is_overflow = IS_OVERFLOW_OBJ(STM_PSEGMENT, obj); uint8_t mark_value = obj_is_overflow ? CARD_CLEAR : CARD_MARKED_OLD; - _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), - obj, mark_value, false); /* mark marked */ + + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_index_to_card_index(size - 1); /* max valid index */ + + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS_MAX + || write_locks[first_card_index] == 255); /* see gcpage.c */ + + /* XXX: merge ranges */ + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + if (write_locks[card_lock_idx] == CARD_MARKED) { + /* clear or set to old: */ + write_locks[card_lock_idx] = mark_value; + + uintptr_t start = get_card_index_to_index(card_index); + uintptr_t stop = get_card_index_to_index(card_index + 1); + + dprintf(("trace_cards on %p with start:%lu stop:%lu\n", + obj, start, stop)); + stmcb_trace_cards(realobj, &minor_trace_if_young, + start, stop); + + } + + /* all cards should be cleared on overflow objs */ + assert(IMPLY(obj_is_overflow, + write_locks[card_lock_idx] == CARD_CLEAR)); + + card_index++; + } + obj->stm_flags &= ~GCFLAG_CARDS_SET; } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -225,8 +225,7 @@ of CARD_SIZE. It can only be used on objects one any object, but only helps with those that were internally marked with GCFLAG_HAS_CARDS It has the same purpose as stm_write() for TM. - 'index' is the byte-offset into the object divided by _STM_CARD_SIZE - plus 1: (offset // CARD_SIZE) + 1 + 'index' can be anything < size of the object */ __attribute__((always_inline)) static inline void stm_write_card(object_t *obj, uintptr_t index) @@ -251,6 +250,12 @@ */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +extern void stmcb_trace_cards(struct object_s *, void (object_t **), + uintptr_t start, uintptr_t stop); +/* needs to work with index > any valid index (can just return + object's size then) */ +extern uintptr_t stmcb_index_to_byte_offset(struct object_s *, + uintptr_t index); extern void stmcb_commit_soon(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -76,7 +76,6 @@ uint32_t _get_type_id(object_t *obj); void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); -uintptr_t _index_to_card_index(object_t *obj, int n); void _set_weakref(object_t *obj, object_t *v); object_t* _get_weakref(object_t *obj); @@ -268,16 +267,6 @@ return *field; } -uintptr_t _index_to_card_index(object_t *obj, int n) -{ - long nrefs = (long)((myobj_t*)obj)->type_id - 421420; - assert(n < nrefs); - - stm_char *field_addr = NULL; - field_addr += SIZEOF_MYOBJ; /* header */ - field_addr += n * sizeof(void*); /* field */ - return ((uintptr_t)field_addr / _STM_CARD_SIZE) + 1; -} ssize_t stmcb_size_rounded_up(struct object_s *obj) { @@ -312,6 +301,32 @@ } } +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + int i; + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 421420) { + /* basic case: no references */ + return; + } + + for (i=start; (i < myobj->type_id - 421420) && (i < stop); i++) { + object_t **ref = ((object_t **)(myobj + 1)) + i; + visit(ref); + } +} + +uintptr_t stmcb_index_to_byte_offset(struct object_s *obj, uintptr_t index) +{ + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 421420) { + /* basic case: no references */ + return sizeof(struct myobj_s) + index; + } + return sizeof(struct myobj_s) + index * sizeof(object_t*); +} + void stm_push_marker(stm_thread_local_t *tl, uintptr_t onum, object_t *ob) { STM_PUSH_MARKER(*tl, onum, ob); @@ -358,8 +373,6 @@ class EmptyStack(Exception): pass -def byte_offset_to_card_index(offset): - return (offset // CARD_SIZE) + 1 def is_in_nursery(o): return lib.stm_can_move(o) @@ -402,7 +415,7 @@ def stm_set_ref(obj, idx, ref, use_cards=False): if use_cards: - stm_write_card(obj, lib._index_to_card_index(obj, idx)) + stm_write_card(obj, idx) else: stm_write(obj) lib._set_ptr(obj, idx, ref) @@ -414,8 +427,7 @@ def stm_set_char(obj, c, offset=HDR, use_cards=False): assert HDR <= offset < stm_get_obj_size(obj) if use_cards: - index = byte_offset_to_card_index(offset) - stm_write_card(obj, index) + stm_write_card(obj, offset) else: stm_write(obj) stm_get_real_address(obj)[offset] = c diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -55,41 +55,33 @@ o = stm_allocate_old_refs(200) self.start_transaction() p = stm_allocate(64) - d = stm_allocate(64) stm_set_ref(o, 199, p, True) # without a write-barrier: - lib._set_ptr(o, 0, d) + lib._set_ptr(o, 0, ffi.cast("object_t*", -1)) self.push_root(o) stm_minor_collect() o = self.pop_root() + lib._set_ptr(o, 0, ffi.NULL) + pn = stm_get_ref(o, 199) assert not is_in_nursery(pn) assert pn != p - # d was not traced! - dn = stm_get_ref(o, 0) - assert is_in_nursery(dn) - assert dn == d - assert not stm_was_written(o) stm_write_card(o, 2) assert stm_was_written_card(o) # card cleared after last collection, # so no retrace of index 199: - d2 = stm_allocate(64) + # without a write-barrier: - lib._set_ptr(o, 199, d2) + lib._set_ptr(o, 199, ffi.cast("object_t*", -1)) self.push_root(o) stm_minor_collect() o = self.pop_root() - # d2 was not traced! - dn = stm_get_ref(o, 199) - assert is_in_nursery(dn) - assert dn == d2 def test_nursery2(self): o = stm_allocate_old_refs(200) @@ -111,16 +103,16 @@ assert not is_in_nursery(stm_get_ref(o, 100)) def test_nursery3(self): - o = stm_allocate_old_refs(200) + o = stm_allocate_old_refs(2000) self.start_transaction() stm_minor_collect() p = stm_allocate(64) d = stm_allocate(64) - e = stm_allocate(64) - stm_set_ref(o, 199, p, True) + stm_set_ref(o, 1999, p, True) stm_set_ref(o, 1, d, True) - lib._set_ptr(o, 100, e) # no card marked! + + lib._set_ptr(o, 1000, ffi.cast("object_t*", -1)) assert not stm_was_written(o) assert stm_was_written_card(o) @@ -129,9 +121,9 @@ stm_minor_collect() o = self.pop_root() - assert not is_in_nursery(stm_get_ref(o, 199)) + assert not is_in_nursery(stm_get_ref(o, 1999)) assert not is_in_nursery(stm_get_ref(o, 1)) - assert stm_get_ref(o, 100) == e # not traced + def test_abort_cleanup(self): o = stm_allocate_old_refs(200) From noreply at buildbot.pypy.org Fri May 23 14:17:29 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 23 May 2014 14:17:29 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: add stmcb_should_use_cards Message-ID: <20140523121729.9C6281C06D8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1244:1d1ac9cd4297 Date: 2014-05-23 14:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/1d1ac9cd4297/ Log: add stmcb_should_use_cards diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -579,9 +579,16 @@ || card_index == last_card_index)) { /* this is the last card */ /* do the copying: */ uintptr_t copy_size; + uintptr_t next_card_offset; + uintptr_t next_card = card_index; - uintptr_t next_card_offset = stmcb_index_to_byte_offset( - realobj, get_card_index_to_index(card_index + 1)); + if (card_value == CARD_MARKED_OLD) { + /* card_index is the last card of the object, but we need + to go one further to get the right offset */ + next_card++; + } + next_card_offset = stmcb_index_to_byte_offset( + realobj, get_card_index_to_index(next_card)); if (next_card_offset > obj_size) next_card_offset = obj_size; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -111,7 +111,7 @@ return addr; } -object_t *_stm_allocate_old(ssize_t size_rounded_up) +object_t *_stm_allocate_old(ssize_t size_rounded_up, long use_cards) { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ char *p = allocate_outside_nursery_large(size_rounded_up); @@ -119,7 +119,7 @@ object_t *o = (object_t *)(p - stm_object_pages); o->stm_flags = GCFLAG_WRITE_BARRIER; - if (size_rounded_up > CARD_SIZE) + if (use_cards && size_rounded_up > CARD_SIZE) o->stm_flags |= GCFLAG_HAS_CARDS; if (testing_prebuilt_objs == NULL) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -65,6 +65,8 @@ object_t *obj = *pobj; object_t *nobj; uintptr_t nobj_sync_now; + char *realobj; + size_t size; if (obj == NULL) return; @@ -75,8 +77,6 @@ to GCWORD_MOVED. In that case, the forwarding location, i.e. where the object moved to, is stored in the second word in 'obj'. */ object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; - char *realobj; - size_t size; if (obj->stm_flags & GCFLAG_HAS_SHADOW) { /* ^^ the single check above detects both already-moved objects @@ -114,7 +114,7 @@ copy_large_object:; char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); - if (size > CARD_SIZE) + if (size > CARD_SIZE && stmcb_should_use_cards((struct object_s*)realnobj)) nobj->stm_flags |= GCFLAG_HAS_CARDS; nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; @@ -141,6 +141,11 @@ nobj = obj; tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)nobj); nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; + + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size = stmcb_size_rounded_up((struct object_s *)realobj); + if (size > CARD_SIZE && stmcb_should_use_cards((struct object_s*)realobj)) + nobj->stm_flags |= GCFLAG_HAS_CARDS; } /* Set the overflow_number if nedeed */ @@ -655,9 +660,6 @@ char *result = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)(result - stm_object_pages); - if (size_rounded_up > CARD_SIZE) - o->stm_flags |= GCFLAG_HAS_CARDS; - tree_insert(STM_PSEGMENT->young_outside_nursery, (uintptr_t)o, 0); memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); @@ -759,8 +761,6 @@ memcpy(realnobj, realobj, size); obj->stm_flags |= GCFLAG_HAS_SHADOW; - if (size > CARD_SIZE) /* probably not necessary */ - nobj->stm_flags |= GCFLAG_HAS_CARDS; tree_insert(STM_PSEGMENT->nursery_objects_shadows, (uintptr_t)obj, (uintptr_t)nobj); diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -29,7 +29,7 @@ /* We need to make a copy of this object. The extra "long" is for the prebuilt hash. */ size_t size = stmcb_size_rounded_up(obj); - object_t *nobj = _stm_allocate_old(size + sizeof(long)); + object_t *nobj = _stm_allocate_old(size + sizeof(long), 0); /* Copy the object */ char *realnobj = REAL_ADDRESS(stm_object_pages, nobj); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -116,7 +116,7 @@ void _stm_collectable_safe_point(void); /* for tests, but also used in duhton: */ -object_t *_stm_allocate_old(ssize_t size_rounded_up); +object_t *_stm_allocate_old(ssize_t size_rounded_up, long use_cards); char *_stm_real_address(object_t *o); #ifdef STM_TESTS #include @@ -256,6 +256,7 @@ object's size then) */ extern uintptr_t stmcb_index_to_byte_offset(struct object_s *, uintptr_t index); +extern long stmcb_should_use_cards(struct object_s *); extern void stmcb_commit_soon(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -41,7 +41,7 @@ /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); object_t *stm_allocate_weakref(ssize_t size_rounded_up); -object_t *_stm_allocate_old(ssize_t size_rounded_up); +object_t *_stm_allocate_old(ssize_t size_rounded_up, long use_cards); /*void stm_write_card(); use _checked_stm_write_card() instead */ @@ -321,12 +321,21 @@ { struct myobj_s *myobj = (struct myobj_s*)obj; if (myobj->type_id < 421420) { - /* basic case: no references */ - return sizeof(struct myobj_s) + index; + abort(); // works, but we want to test otherwise + /* basic case: index=byteoffset */ + return index; } return sizeof(struct myobj_s) + index * sizeof(object_t*); } +long stmcb_should_use_cards(struct object_s *obj) +{ + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 421420) + return 0; /*no refs*/ + return 1; +} + void stm_push_marker(stm_thread_local_t *tl, uintptr_t onum, object_t *ob) { STM_PUSH_MARKER(*tl, onum, ob); @@ -378,13 +387,13 @@ return lib.stm_can_move(o) def stm_allocate_old(size): - o = lib._stm_allocate_old(size) + o = lib._stm_allocate_old(size, False) tid = 42 + size lib._set_type_id(o, tid) return o def stm_allocate_old_refs(n): - o = lib._stm_allocate_old(HDR + n * WORD) + o = lib._stm_allocate_old(HDR + n * WORD, True) tid = 421420 + n lib._set_type_id(o, tid) return o diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py --- a/c7/test/test_card_marking.py +++ b/c7/test/test_card_marking.py @@ -17,14 +17,14 @@ self.switch(0) def test_simple(self): - o = stm_allocate_old(1024) + o = stm_allocate_old_refs(1024) self.start_transaction() stm_read(o) stm_write(o) self.commit_transaction() def test_simple2(self): - o = stm_allocate_old(1024) + o = stm_allocate_old_refs(1024) self.start_transaction() stm_write_card(o, 5) assert not stm_was_written(o) # don't remove GCFLAG_WRITE_BARRIER @@ -34,7 +34,7 @@ @py.test.mark.parametrize("k", range(3)) def test_overflow(self, k): self.start_transaction() - o = stm_allocate(1024) + o = stm_allocate_refs(1024) self.push_root(o) self._collect(k) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -380,7 +380,7 @@ thread_state.register_root(r) def op_allocate_ref(ex, global_state, thread_state): - num = str(global_state.rnd.randrange(1, 100)) + num = str(global_state.rnd.randrange(1, 1000)) r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) ex.do('%s = stm_allocate_refs(%s)' % (r, num)) @@ -415,7 +415,7 @@ r = thread_state.get_random_root() trs = thread_state.transaction_state is_ref = global_state.has_ref_type(r) - try_cards = global_state.rnd.randrange(1, 100) > 5 + try_cards = global_state.rnd.randrange(1, 100) > 5# and False # # check for possible write-write conflict: was_written = False From noreply at buildbot.pypy.org Fri May 23 17:24:01 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 May 2014 17:24:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Some trailing whitespace Message-ID: <20140523152401.DB8DF1D2808@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71686:7f23c01d85f2 Date: 2014-05-23 10:22 -0500 http://bitbucket.org/pypy/pypy/changeset/7f23c01d85f2/ Log: Some trailing whitespace diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -13,7 +13,7 @@ """NOT_RPYTHON""" # because parent __init__ isn't if space.config.translating: del self.__class__.interpleveldefs['pypy_getudir'] - super(Module, self).__init__(space, w_name) + super(Module, self).__init__(space, w_name) self.recursionlimit = 100 self.w_default_encoder = None self.defaultencoding = "ascii" @@ -21,13 +21,13 @@ self.debug = True interpleveldefs = { - '__name__' : '(space.wrap("sys"))', - '__doc__' : '(space.wrap("PyPy sys module"))', + '__name__' : '(space.wrap("sys"))', + '__doc__' : '(space.wrap("PyPy sys module"))', - 'platform' : 'space.wrap(sys.platform)', + 'platform' : 'space.wrap(sys.platform)', 'maxint' : 'space.wrap(sys.maxint)', 'maxsize' : 'space.wrap(sys.maxint)', - 'byteorder' : 'space.wrap(sys.byteorder)', + 'byteorder' : 'space.wrap(sys.byteorder)', 'maxunicode' : 'space.wrap(vm.MAXUNICODE)', 'stdin' : 'state.getio(space).w_stdin', '__stdin__' : 'state.getio(space).w_stdin', @@ -36,35 +36,35 @@ 'stderr' : 'state.getio(space).w_stderr', '__stderr__' : 'state.getio(space).w_stderr', 'pypy_objspaceclass' : 'space.wrap(repr(space))', - #'prefix' : # added by pypy_initial_path() when it + #'prefix' : # added by pypy_initial_path() when it #'exec_prefix' : # succeeds, pointing to trunk or /usr 'path' : 'state.get(space).w_path', - 'modules' : 'state.get(space).w_modules', + 'modules' : 'state.get(space).w_modules', 'argv' : 'state.get(space).w_argv', 'py3kwarning' : 'space.w_False', - 'warnoptions' : 'state.get(space).w_warnoptions', + 'warnoptions' : 'state.get(space).w_warnoptions', 'builtin_module_names' : 'space.w_None', 'pypy_getudir' : 'state.pypy_getudir', # not translated 'pypy_find_stdlib' : 'initpath.pypy_find_stdlib', 'pypy_find_executable' : 'initpath.pypy_find_executable', 'pypy_resolvedirof' : 'initpath.pypy_resolvedirof', - '_getframe' : 'vm._getframe', - '_current_frames' : 'currentframes._current_frames', - 'setrecursionlimit' : 'vm.setrecursionlimit', - 'getrecursionlimit' : 'vm.getrecursionlimit', - 'setcheckinterval' : 'vm.setcheckinterval', - 'getcheckinterval' : 'vm.getcheckinterval', - 'exc_info' : 'vm.exc_info', - 'exc_clear' : 'vm.exc_clear', + '_getframe' : 'vm._getframe', + '_current_frames' : 'currentframes._current_frames', + 'setrecursionlimit' : 'vm.setrecursionlimit', + 'getrecursionlimit' : 'vm.getrecursionlimit', + 'setcheckinterval' : 'vm.setcheckinterval', + 'getcheckinterval' : 'vm.getcheckinterval', + 'exc_info' : 'vm.exc_info', + 'exc_clear' : 'vm.exc_clear', 'settrace' : 'vm.settrace', 'gettrace' : 'vm.gettrace', 'setprofile' : 'vm.setprofile', 'getprofile' : 'vm.getprofile', 'call_tracing' : 'vm.call_tracing', 'getsizeof' : 'vm.getsizeof', - - 'executable' : 'space.wrap("py.py")', + + 'executable' : 'space.wrap("py.py")', 'api_version' : 'version.get_api_version(space)', 'version_info' : 'version.get_version_info(space)', 'version' : 'version.get_version(space)', @@ -73,14 +73,14 @@ '_mercurial' : 'version.get_repo_info(space)', 'hexversion' : 'version.get_hexversion(space)', - 'displayhook' : 'hook.displayhook', - '__displayhook__' : 'hook.__displayhook__', + 'displayhook' : 'hook.displayhook', + '__displayhook__' : 'hook.__displayhook__', 'meta_path' : 'space.wrap([])', 'path_hooks' : 'space.wrap([])', 'path_importer_cache' : 'space.wrap({})', 'dont_write_bytecode' : 'space.w_False', - - 'getdefaultencoding' : 'interp_encoding.getdefaultencoding', + + 'getdefaultencoding' : 'interp_encoding.getdefaultencoding', 'setdefaultencoding' : 'interp_encoding.setdefaultencoding', 'getfilesystemencoding' : 'interp_encoding.getfilesystemencoding', @@ -119,21 +119,21 @@ w_modules = self.get('modules') try: return space.getitem(w_modules, space.wrap(name)) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - return None + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + return None - def setmodule(self, w_module): + def setmodule(self, w_module): space = self.space w_name = self.space.getattr(w_module, space.wrap('__name__')) w_modules = self.get('modules') self.space.setitem(w_modules, w_name, w_module) def getdictvalue(self, space, attr): - """ specialize access to dynamic exc_* attributes. """ - value = MixedModule.getdictvalue(self, space, attr) - if value is not None: + """ specialize access to dynamic exc_* attributes. """ + value = MixedModule.getdictvalue(self, space, attr) + if value is not None: return value if attr == 'exc_type': operror = space.getexecutioncontext().sys_exc_info() @@ -153,7 +153,7 @@ return space.w_None else: return space.wrap(operror.get_traceback()) - return None + return None def get_w_default_encoder(self): if self.w_default_encoder is not None: From noreply at buildbot.pypy.org Fri May 23 17:24:03 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 23 May 2014 17:24:03 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140523152403.3ECF41D2808@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71687:ea5040e38901 Date: 2014-05-23 10:23 -0500 http://bitbucket.org/pypy/pypy/changeset/ea5040e38901/ Log: merged upstream diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -5,7 +5,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty @@ -61,15 +61,6 @@ def getlength(self): return self.buf.getlength() - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = SubBuffer(self.buf, start, size) - return W_MemoryView(buf) - def descr_tobytes(self, space): return space.wrap(self.as_str()) @@ -81,25 +72,25 @@ return space.newlist(result) def descr_getitem(self, space, w_index): - start, stop, step = space.decode_index(w_index, self.getlength()) + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only return space.wrap(self.buf.getitem(start)) - res = self.getslice(start, stop) - return space.wrap(res) + else: + buf = SubBuffer(self.buf, start, size) + return W_MemoryView(buf) def descr_setitem(self, space, w_index, w_obj): if self.buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "cannot modify read-only memory")) - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + raise oefmt(space.w_TypeError, "cannot modify read-only memory") + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) if value.getlength() != size: - raise OperationError(space.w_ValueError, space.wrap( - "cannot modify size of memoryview object")) + raise oefmt(space.w_ValueError, + "cannot modify size of memoryview object") if step == 0: # index only self.buf.setitem(start, value.getitem(0)) elif step == 1: From noreply at buildbot.pypy.org Sat May 24 00:39:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 24 May 2014 00:39:39 +0200 (CEST) Subject: [pypy-commit] pypy default: initial support for numpy.frombuffer Message-ID: <20140523223939.C44671D2392@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71688:d7b779ef7782 Date: 2014-05-23 17:25 -0400 http://bitbucket.org/pypy/pypy/changeset/d7b779ef7782/ Log: initial support for numpy.frombuffer diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -15,6 +15,7 @@ 'empty': 'ctors.zeros', 'empty_like': 'ctors.empty_like', 'fromstring': 'ctors.fromstring', + 'frombuffer': 'ctors.frombuffer', 'concatenate': 'arrayops.concatenate', 'count_nonzero': 'arrayops.count_nonzero', diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop @@ -191,3 +192,54 @@ return _fromstring_bin(space, s, count, length, dtype) else: return _fromstring_text(space, s, count, sep, length, dtype) + + +def _getbuffer(space, w_buffer): + try: + return space.writebuf_w(w_buffer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + return space.readbuf_w(w_buffer) + + + at unwrap_spec(count=int, offset=int) +def frombuffer(space, w_buffer, w_dtype=None, count=-1, offset=0): + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero in type") + + try: + buf = _getbuffer(space, w_buffer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + buf = _getbuffer(space, space.getattr(w_buffer, space.wrap('__buffer__'))) + + ts = buf.getlength() + if offset < 0 or offset > ts: + raise oefmt(space.w_ValueError, + "offset must be non-negative and no greater than " + "buffer length (%d)", ts) + + s = ts - offset + if offset: + buf = SubBuffer(buf, offset, s) + + n = count + itemsize = dtype.elsize + assert itemsize > 0 + if n < 0: + if s % itemsize != 0: + raise oefmt(space.w_ValueError, + "buffer size must be a multiple of element size") + n = s / itemsize + else: + if s < n * itemsize: + raise oefmt(space.w_ValueError, + "buffer is smaller than requested size") + + a = W_NDimArray.from_shape(space, [n], dtype=dtype) + loop.fromstring_loop(space, a, dtype, itemsize, buf.as_str()) + return space.wrap(a) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3142,6 +3142,23 @@ cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) + def test_frombuffer(self): + import numpy as np + exc = raises(AttributeError, np.frombuffer, None) + assert str(exc.value) == "'NoneType' object has no attribute '__buffer__'" + exc = raises(AttributeError, np.frombuffer, memoryview(self.data)) + assert str(exc.value) == "'memoryview' object has no attribute '__buffer__'" + exc = raises(ValueError, np.frombuffer, self.data, 'S0') + assert str(exc.value) == "itemsize cannot be zero in type" + exc = raises(ValueError, np.frombuffer, self.data, offset=-1) + assert str(exc.value) == "offset must be non-negative and no greater than buffer length (32)" + exc = raises(ValueError, np.frombuffer, self.data, count=100) + assert str(exc.value) == "buffer is smaller than requested size" + for data in [self.data, buffer(self.data)]: + a = np.frombuffer(data) + for i in range(4): + assert a[i] == i + 1 + def test_fromstring(self): import sys from numpypy import fromstring, dtype From noreply at buildbot.pypy.org Sat May 24 00:39:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 24 May 2014 00:39:41 +0200 (CEST) Subject: [pypy-commit] pypy default: have frombuffer actually use buffer's storage Message-ID: <20140523223941.162CE1D2392@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71689:e115979b6395 Date: 2014-05-23 18:27 -0400 http://bitbucket.org/pypy/pypy/changeset/e115979b6395/ Log: have frombuffer actually use buffer's storage diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -215,7 +215,8 @@ except OperationError as e: if not e.match(space, space.w_TypeError): raise - buf = _getbuffer(space, space.getattr(w_buffer, space.wrap('__buffer__'))) + w_buffer = space.getattr(w_buffer, space.wrap('__buffer__')) + buf = _getbuffer(space, w_buffer) ts = buf.getlength() if offset < 0 or offset > ts: @@ -240,6 +241,13 @@ raise oefmt(space.w_ValueError, "buffer is smaller than requested size") - a = W_NDimArray.from_shape(space, [n], dtype=dtype) - loop.fromstring_loop(space, a, dtype, itemsize, buf.as_str()) - return space.wrap(a) + try: + storage = buf.get_raw_address() + except ValueError: + a = W_NDimArray.from_shape(space, [n], dtype=dtype) + loop.fromstring_loop(space, a, dtype, itemsize, buf.as_str()) + return a + else: + writable = not buf.readonly + return W_NDimArray.from_shape_and_storage(space, [n], storage, dtype=dtype, + w_base=w_buffer, writable=writable) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3132,6 +3132,8 @@ class AppTestSupport(BaseNumpyAppTest): + spaceconfig = {'usemodules': ['micronumpy', 'array']} + def setup_class(cls): import struct BaseNumpyAppTest.setup_class.im_func(cls) @@ -3159,6 +3161,19 @@ for i in range(4): assert a[i] == i + 1 + import array + data = array.array('c', 'testing') + a = np.frombuffer(data, 'c') + assert a.base is data + a[2] = 'Z' + assert data.tostring() == 'teZting' + + data = buffer(data) + a = np.frombuffer(data, 'c') + assert a.base is data + exc = raises(ValueError, "a[2] = 'Z'") + assert str(exc.value) == "assignment destination is read-only" + def test_fromstring(self): import sys from numpypy import fromstring, dtype From noreply at buildbot.pypy.org Sat May 24 00:39:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 24 May 2014 00:39:42 +0200 (CEST) Subject: [pypy-commit] pypy default: test frombuffer using __buffer__ attribute Message-ID: <20140523223942.487491D2392@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71690:c3bdd9af8ac0 Date: 2014-05-23 18:32 -0400 http://bitbucket.org/pypy/pypy/changeset/c3bdd9af8ac0/ Log: test frombuffer using __buffer__ attribute diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3174,6 +3174,14 @@ exc = raises(ValueError, "a[2] = 'Z'") assert str(exc.value) == "assignment destination is read-only" + class A(object): + __buffer__ = 'abc' + + data = A() + a = np.frombuffer(data, 'c') + #assert a.base is data.__buffer__ + assert a.tostring() == 'abc' + def test_fromstring(self): import sys from numpypy import fromstring, dtype From noreply at buildbot.pypy.org Sat May 24 08:34:53 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 24 May 2014 08:34:53 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Make format and itemsize attributes of W_MemoryView instead of Buffer. They are exported by returning a tuple from buffer_w(). Message-ID: <20140524063453.77D6A1D2B2F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71691:8aef32a90ec4 Date: 2014-05-24 08:32 +0200 http://bitbucket.org/pypy/pypy/changeset/8aef32a90ec4/ Log: Make format and itemsize attributes of W_MemoryView instead of Buffer. They are exported by returning a tuple from buffer_w(). diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1350,7 +1350,7 @@ def readbuf_w(self, w_obj): # Old buffer interface, returns a readonly buffer (PyObject_AsReadBuffer) try: - return w_obj.buffer_w(self, self.BUF_SIMPLE) + return w_obj.buffer_w(self, self.BUF_SIMPLE)[0] except TypeError: raise oefmt(self.w_TypeError, "expected an object with a buffer interface") @@ -1358,7 +1358,7 @@ def writebuf_w(self, w_obj): # Old buffer interface, returns a writeable buffer (PyObject_AsWriteBuffer) try: - return w_obj.buffer_w(self, self.BUF_WRITABLE) + return w_obj.buffer_w(self, self.BUF_WRITABLE)[0] except TypeError: raise oefmt(self.w_TypeError, "expected an object with a writable buffer interface") @@ -1366,7 +1366,7 @@ def charbuf_w(self, w_obj): # Old buffer interface, returns a character buffer (PyObject_AsCharBuffer) try: - buf = w_obj.buffer_w(self, self.BUF_SIMPLE) + buf = w_obj.buffer_w(self, self.BUF_SIMPLE)[0] except TypeError: raise oefmt(self.w_TypeError, "expected an object with a buffer interface") @@ -1392,7 +1392,7 @@ if self.isinstance_w(w_obj, self.w_unicode): return StringBuffer(w_obj.identifier_w(self)) try: - return w_obj.buffer_w(self, self.BUF_SIMPLE) + return w_obj.buffer_w(self, self.BUF_SIMPLE)[0] except TypeError: self._getarg_error("bytes or buffer", w_obj) elif code == 's#': @@ -1401,13 +1401,13 @@ if self.isinstance_w(w_obj, self.w_unicode): return w_obj.identifier_w(self) try: - return w_obj.buffer_w(self, self.BUF_SIMPLE).as_str() + return w_obj.buffer_w(self, self.BUF_SIMPLE)[0].as_str() except TypeError: self._getarg_error("bytes or read-only buffer", w_obj) elif code == 'w*': try: try: - return w_obj.buffer_w(self, self.BUF_WRITABLE) + return w_obj.buffer_w(self, self.BUF_WRITABLE)[0] except OperationError: pass except TypeError: @@ -1415,7 +1415,7 @@ self._getarg_error("read-write buffer", w_obj) elif code == 'y*': try: - return w_obj.buffer_w(self, self.BUF_SIMPLE) + return w_obj.buffer_w(self, self.BUF_SIMPLE)[0] except TypeError: self._getarg_error("bytes or buffer", w_obj) else: @@ -1437,7 +1437,7 @@ if not e.match(self, self.w_TypeError): raise try: - buf = w_obj.buffer_w(self, 0) + buf = w_obj.buffer_w(self, 0)[0] except TypeError: raise oefmt(self.w_TypeError, "'%T' does not support the buffer interface", w_obj) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1559,7 +1559,7 @@ source = space.bytes_w(w_source) else: try: - buf = space.buffer_w(w_source, space.BUF_SIMPLE) + buf = space.buffer_w(w_source, space.BUF_SIMPLE)[0] except OperationError as e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -12,8 +12,6 @@ def __init__(self, len): self.data = ['\x00'] * len self.readonly = False - self.format = 'B' - self.itemsize = 1 def getlength(self): return len(self.data) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -17,8 +17,6 @@ self.raw_cdata = raw_cdata self.size = size self.readonly = False - self.format = 'B' - self.itemsize = 1 def getlength(self): return self.size @@ -48,7 +46,7 @@ self.keepalive = keepalive def buffer_w(self, space, flags): - return self.buffer + return self.buffer, 'B', 1 def descr_len(self, space): return space.wrap(self.buffer.getlength()) @@ -66,7 +64,7 @@ self.buffer.getlength()) if step not in (0, 1): raise oefmt(space.w_NotImplementedError, "") - value = space.buffer_w(w_newstring, space.BUF_CONTIG_RO) + value = space.buffer_w(w_newstring, space.BUF_CONTIG_RO)[0] if value.getlength() != size: raise oefmt(space.w_ValueError, "cannot modify size of memoryview object") diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -114,8 +114,6 @@ self.start = start self.length = length self.readonly = False - self.format = 'B' - self.itemsize = 1 def getlength(self): return self.length diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -17,8 +17,6 @@ def __init__(self, w_bytesio): self.w_bytesio = w_bytesio self.readonly = False - self.format = 'B' - self.itemsize = 1 def getlength(self): return int(self.w_bytesio.getsize()) @@ -96,7 +94,7 @@ def write_w(self, space, w_data): self._check_closed(space) - buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str() + buf = space.buffer_w(w_data, space.BUF_CONTIG_RO)[0].as_str() length = len(buf) if length <= 0: return space.wrap(0) diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -9,8 +9,6 @@ def __init__(self, datainstance): self.datainstance = datainstance self.readonly = False - self.format = 'B' - self.itemsize = 1 def getlength(self): return self.datainstance.getrawsize() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -361,7 +361,7 @@ self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) def buffer_w(self, space, flags): - return RawFFIBuffer(self) + return RawFFIBuffer(self), 'B', 1 def getrawsize(self): raise NotImplementedError("abstract base class") diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -139,7 +139,7 @@ self.allocated = 0 def buffer_w(self, space, flags): - return ArrayBuffer(self, False) + return ArrayBuffer(self, False), self.typecode, self.itemsize_ def descr_append(self, space, w_x): """ append(x) @@ -632,12 +632,10 @@ def __init__(self, array, readonly): self.array = array - self.format = array.typecode - self.itemsize = array.itemsize_ self.readonly = readonly def getlength(self): - return self.array.len * self.itemsize + return self.array.len * self.array.itemsize_ def getitem(self, index): array = self.array diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -20,8 +20,6 @@ self.c_buf = c_buf self.c_len = c_len self.w_obj = w_obj - self.format = 'B' - self.itemsize = 1 def destructor(self): assert isinstance(self, CBufferMixin) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -253,7 +253,5 @@ the buffer protocol.""" if space.is_w(space.type(w_obj), space.w_bytes): return w_obj - buffer = space.buffer_w(w_obj, space.BUF_FULL_RO) + buffer = space.buffer_w(w_obj, space.BUF_FULL_RO)[0] return space.wrapbytes(buffer.as_str()) - - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -237,8 +237,6 @@ self.size = size self.w_obj = w_obj # kept alive self.readonly = True - self.format = 'B' - self.itemsize = 1 def getlength(self): return self.size diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -477,9 +477,6 @@ def __init__(self, impl, readonly): self.impl = impl self.readonly = readonly - #XXX - self.format = 'B' - self.itemsize = 1 def getitem(self, item): return raw_storage_getitem(lltype.Char, self.impl.storage, item) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -619,7 +619,8 @@ "ctypes not implemented yet")) def buffer_w(self, space, flags): - return self.implementation.get_buffer(space, True) + # XXX format isn't always 'B' probably + return self.implementation.get_buffer(space, True), 'B', 1 def descr_get_data(self, space): return space.newbuffer(self.implementation.get_buffer(space, False)) diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -19,8 +19,9 @@ def buffer_w(self, space, flags): self.check_valid() - return MMapBuffer(self.space, self.mmap, - bool(flags & space.BUF_WRITABLE)) + return (MMapBuffer(self.space, self.mmap, + bool(flags & space.BUF_WRITABLE)), + 'B', 1) def close(self): self.mmap.close() @@ -311,8 +312,6 @@ self.space = space self.mmap = mmap self.readonly = readonly - self.format = 'B' - self.itemsize = 1 def getlength(self): return self.mmap.size diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -96,7 +96,7 @@ @unwrap_spec(format=str, offset=int) def unpack_from(space, format, w_buffer, offset=0): size = _calcsize(space, format) - buf = space.buffer_w(w_buffer, space.BUF_SIMPLE) + buf = space.buffer_w(w_buffer, space.BUF_SIMPLE)[0] if offset < 0: offset += buf.getlength() if offset < 0 or (buf.getlength() - offset) < size: diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -41,7 +41,7 @@ is_root(w_subtype) def buffer_w(self, space, flags): - return StringBuffer("foobar") + return StringBuffer("foobar"), 'B', 1 def str_w(self, space): return NonConstant("foobar") diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -29,7 +29,7 @@ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def buffer_w(self, space, flags): - return BytearrayBuffer(self.data, False) + return BytearrayBuffer(self.data, False), 'B', 1 def _new(self, value): return W_BytearrayObject(_make_data(value)) @@ -55,7 +55,7 @@ @staticmethod def _op_val(space, w_other): - return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() + return space.buffer_w(w_other, space.BUF_SIMPLE)[0].as_str() def _chr(self, char): assert len(char) == 1 @@ -1062,8 +1062,6 @@ def __init__(self, data, readonly): self.data = data self.readonly = readonly - self.format = 'B' - self.itemsize = 1 def getlength(self): return len(self.data) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -400,7 +400,7 @@ def buffer_w(self, space, flags): space.check_buf_flags(flags, True) - return StringBuffer(self._value) + return StringBuffer(self._value), 'B', 1 def listview_int(self): return _create_list_from_bytes(self._value) @@ -433,7 +433,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return space.buffer_w(w_other, space.BUF_SIMPLE).as_str() + return space.buffer_w(w_other, space.BUF_SIMPLE)[0].as_str() def _chr(self, char): assert len(char) == 1 @@ -733,7 +733,7 @@ # String-like argument try: - buf = space.buffer_w(w_source, space.BUF_FULL_RO) + buf = space.buffer_w(w_source, space.BUF_FULL_RO)[0] except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -15,18 +15,20 @@ an interp-level buffer. """ - def __init__(self, buf): + def __init__(self, buf, format='B', itemsize=1): assert isinstance(buf, Buffer) self.buf = buf + self.format = format + self.itemsize = itemsize def buffer_w(self, space, flags): self._check_released(space) space.check_buf_flags(flags, self.buf.readonly) - return self.buf + return self.buf, self.format, self.itemsize @staticmethod def descr_new_memoryview(space, w_subtype, w_object): - return W_MemoryView(space.buffer_w(w_object, space.BUF_FULL_RO)) + return W_MemoryView(*space.buffer_w(w_object, space.BUF_FULL_RO)) def _make_descr__cmp(name): def descr__cmp(self, space, w_other): @@ -39,7 +41,7 @@ return space.wrap(getattr(operator, name)(str1, str2)) try: - buf = space.buffer_w(w_other, space.BUF_CONTIG_RO) + buf = space.buffer_w(w_other, space.BUF_CONTIG_RO)[0] except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -60,7 +62,7 @@ return buf.getslice(0, n_bytes, 1, n_bytes) def getlength(self): - return self.buf.getlength() // self.buf.itemsize + return self.buf.getlength() // self.itemsize def descr_tobytes(self, space): self._check_released(space) @@ -69,7 +71,7 @@ def descr_tolist(self, space): self._check_released(space) buf = self.buf - if buf.format != 'B': + if self.format != 'B': raise oefmt(space.w_NotImplementedError, "tolist() only supports byte views") result = [] @@ -83,13 +85,13 @@ if step not in (0, 1): raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only - a = start * self.buf.itemsize - b = a + self.buf.itemsize + a = start * self.itemsize + b = a + self.itemsize return space.wrapbytes( ''.join([self.buf.getitem(i) for i in range(a, b)])) else: - buf = SubBuffer(self.buf, start * self.buf.itemsize, - size * self.buf.itemsize) + buf = SubBuffer(self.buf, start * self.itemsize, + size * self.itemsize) return W_MemoryView(buf) def descr_setitem(self, space, w_index, w_obj): @@ -99,11 +101,11 @@ start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): raise oefmt(space.w_NotImplementedError, "") - value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) - if value.getlength() != size * self.buf.itemsize: + value = space.buffer_w(w_obj, space.BUF_CONTIG_RO)[0] + if value.getlength() != size * self.itemsize: raise oefmt(space.w_ValueError, "cannot modify size of memoryview object") - self.buf.setslice(start * self.buf.itemsize, value.as_str()) + self.buf.setslice(start * self.itemsize, value.as_str()) def descr_len(self, space): self._check_released(space) @@ -111,11 +113,11 @@ def w_get_format(self, space): self._check_released(space) - return space.wrap(self.buf.format) + return space.wrap(self.format) def w_get_itemsize(self, space): self._check_released(space) - return space.wrap(self.buf.itemsize) + return space.wrap(self.itemsize) def w_get_ndim(self, space): self._check_released(space) @@ -131,7 +133,7 @@ def w_get_strides(self, space): self._check_released(space) - return space.newtuple([space.wrap(self.buf.itemsize)]) + return space.newtuple([space.wrap(self.itemsize)]) def w_get_suboffsets(self, space): self._check_released(space) diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -37,7 +37,7 @@ return self.force() def buffer_w(self, space, flags): - return StringBuffer(self.force()) + return StringBuffer(self.force()), 'B', 1 def descr_len(self, space): return space.wrap(self.length) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -6,7 +6,7 @@ class Buffer(object): """Abstract base class for buffers.""" - __slots__ = ['readonly', 'format', 'itemsize'] + __slots__ = ['readonly'] _immutable_ = True def getlength(self): @@ -45,8 +45,6 @@ def __init__(self, value): self.value = value self.readonly = True - self.format = 'B' - self.itemsize = 1 def getlength(self): return len(self.value) @@ -72,8 +70,6 @@ def __init__(self, buffer, offset, size): self.readonly = buffer.readonly - self.format = buffer.format - self.itemsize = buffer.itemsize self.buffer = buffer self.offset = offset self.size = size From noreply at buildbot.pypy.org Sat May 24 08:34:54 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 24 May 2014 08:34:54 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Remove this horrible annotator workaround. Message-ID: <20140524063454.B35261D2B2F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71692:2b9b6d714df8 Date: 2014-05-24 08:33 +0200 http://bitbucket.org/pypy/pypy/changeset/2b9b6d714df8/ Log: Remove this horrible annotator workaround. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -62,7 +62,7 @@ def descr_itemsize(space, self): - return space.wrap(self.itemsize_) + return space.wrap(self.itemsize) def descr_typecode(space, self): @@ -139,7 +139,7 @@ self.allocated = 0 def buffer_w(self, space, flags): - return ArrayBuffer(self, False), self.typecode, self.itemsize_ + return ArrayBuffer(self, False), self.typecode, self.itemsize def descr_append(self, space, w_x): """ append(x) @@ -243,7 +243,7 @@ bytes representation. """ cbuf = self._charbuf_start() - s = rffi.charpsize2str(cbuf, self.len * self.itemsize_) + s = rffi.charpsize2str(cbuf, self.len * self.itemsize) self._charbuf_stop() return self.space.wrapbytes(s) @@ -269,16 +269,16 @@ machine values, as if it had been read from a file using the fromfile() method). """ - if len(s) % self.itemsize_ != 0: + if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) oldlen = self.len - new = len(s) / self.itemsize_ + new = len(s) / self.itemsize if not new: return self.setlen(oldlen + new) cbuf = self._charbuf_start() - copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize_), + copy_string_to_raw(llstr(s), rffi.ptradd(cbuf, oldlen * self.itemsize), 0, len(s)) self._charbuf_stop() @@ -290,14 +290,14 @@ array. Also called as read. """ try: - size = ovfcheck(self.itemsize_ * n) + size = ovfcheck(self.itemsize * n) except OverflowError: raise MemoryError w_item = space.call_method(w_f, 'read', space.wrap(size)) item = space.bytes_w(w_item) if len(item) < size: - n = len(item) % self.itemsize_ - elems = max(0, len(item) - (len(item) % self.itemsize_)) + n = len(item) % self.itemsize + elems = max(0, len(item) - (len(item) % self.itemsize)) if n != 0: item = item[0:elems] self.descr_frombytes(space, item) @@ -407,7 +407,7 @@ rffi.c_memcpy( rffi.cast(rffi.VOIDP, w_a._buffer_as_unsigned()), rffi.cast(rffi.VOIDP, self._buffer_as_unsigned()), - self.len * self.itemsize_ + self.len * self.itemsize ) return w_a @@ -417,18 +417,18 @@ Byteswap all items of the array. If the items in the array are not 1, 2, 4, or 8 bytes in size, RuntimeError is raised. """ - if self.itemsize_ not in [1, 2, 4, 8]: + if self.itemsize not in [1, 2, 4, 8]: msg = "byteswap not supported for this array" raise OperationError(space.w_RuntimeError, space.wrap(msg)) if self.len == 0: return bytes = self._charbuf_start() - tmp = [bytes[0]] * self.itemsize_ - for start in range(0, self.len * self.itemsize_, self.itemsize_): - stop = start + self.itemsize_ - 1 - for i in range(self.itemsize_): + tmp = [bytes[0]] * self.itemsize + for start in range(0, self.len * self.itemsize, self.itemsize): + stop = start + self.itemsize - 1 + for i in range(self.itemsize): tmp[i] = bytes[start + i] - for i in range(self.itemsize_): + for i in range(self.itemsize): bytes[stop - i] = tmp[i] self._charbuf_stop() @@ -635,7 +635,7 @@ self.readonly = readonly def getlength(self): - return self.array.len * self.array.itemsize_ + return self.array.len * self.array.itemsize def getitem(self, index): array = self.array @@ -669,7 +669,7 @@ W_ArrayBase = globals()['W_ArrayBase'] class W_Array(W_ArrayBase): - itemsize_ = mytype.bytes + itemsize = mytype.bytes typecode = mytype.typecode _attrs_ = ('space', 'len', 'allocated', '_lifeline_', 'buffer') From noreply at buildbot.pypy.org Sat May 24 11:12:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 May 2014 11:12:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix (struys and eevee on http://stackoverflow.com/questions/23816549) Message-ID: <20140524091212.442771C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71693:69ff0c3801ca Date: 2014-05-24 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/69ff0c3801ca/ Log: Test and fix (struys and eevee on http://stackoverflow.com/questions/23816549) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -748,11 +748,11 @@ self.lockcounter = 0 def lock_held_by_someone_else(self): - return self.lockowner is not None and not self.lock_held() + me = self.space.getexecutioncontext() # used as thread ident + return self.lockowner is not None and self.lockowner is not me - def lock_held(self): - me = self.space.getexecutioncontext() # used as thread ident - return self.lockowner is me + def lock_held_by_anyone(self): + return self.lockowner is not None def acquire_lock(self): # this function runs with the GIL acquired so there is no race diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -165,7 +165,7 @@ def lock_held(space): if space.config.objspace.usemodules.thread: - return space.wrap(importing.getimportlock(space).lock_held()) + return space.wrap(importing.getimportlock(space).lock_held_by_anyone()) else: return space.w_False diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -62,6 +62,28 @@ self.waitfor(lambda: done) assert done + def test_lock_held_by_another_thread(self): + import thread, imp + lock_held = thread.allocate_lock() + test_complete = thread.allocate_lock() + lock_released = thread.allocate_lock() + def other_thread(): + imp.acquire_lock() # 3 + assert imp.lock_held() + lock_held.release() # 4 + test_complete.acquire() # 7 + imp.release_lock() # 8 + lock_released.release() # 9 + lock_held.acquire() + test_complete.acquire() + lock_released.acquire() + # + thread.start_new_thread(other_thread, ()) # 1 + lock_held.acquire() # 2 + assert imp.lock_held() # 5 + test_complete.release() # 6 + lock_released.acquire() # 10 + class TestImportLock: def test_lock(self, space, monkeypatch): from pypy.module.imp.importing import getimportlock, importhook From noreply at buildbot.pypy.org Sat May 24 12:09:06 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 24 May 2014 12:09:06 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix translation. Message-ID: <20140524100906.797A01C01E5@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r71694:0dc075198a2c Date: 2014-05-24 12:08 +0200 http://bitbucket.org/pypy/pypy/changeset/0dc075198a2c/ Log: Fix translation. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -62,10 +62,12 @@ def descr_itemsize(space, self): + assert isinstance(self, W_ArrayBase) return space.wrap(self.itemsize) def descr_typecode(space, self): + assert isinstance(self, W_ArrayBase) return space.wrap(self.typecode) arr_eq_driver = jit.JitDriver(name='array_eq_driver', greens=['comp_func'], From noreply at buildbot.pypy.org Sat May 24 12:10:40 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Sat, 24 May 2014 12:10:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: STM slides Message-ID: <20140524101040.9DDA81C01E5@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5263:5acdd3db487c Date: 2014-05-24 12:10 +0200 http://bitbucket.org/pypy/extradoc/changeset/5acdd3db487c/ Log: STM slides diff --git a/talk/pycon-italy-2014/talk.rst b/talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/pycon-italy-2014/talk.rst @@ -183,6 +183,8 @@ - ~80% of numpy implemented + * 2336 passing tests out of 3265 + * http://buildbot.pypy.org/numpy-status/latest.html - just try it @@ -237,13 +239,114 @@ - 3x faster than CPython -STM ---- +The future: STM +---------------- -TODO +- Software Transactional Memory -Q&A ---- +- Strategy to solve race conditions -Any question? +- "Finger crossed", rollback in case of conflicts +- On-going research project + + * by Armin Rigo and Remi Meier + +STM semantics +------------- + +- N threads + +- Each thread split into atomic blocks + +- Sequential execution in some arbitrary order + +- In practice: + +- Parallel execution, conflicts solved by STM + + +Unit of execution (1) +--------------------- + +- Atomic blocks == 1 Python bytecode + +- Threads are executed in arbitrary order, but bytecodes are atomic + +- ==> Same semantics as GIL + +- "and this will solve the GIL problem" (A. Rigo, EuroPython 2011 lighting talk) + +Unit of execution (2) +---------------------- + +- Larger atomic blocks + +- ``with atomic:`` + +- Much easier to use than explicit locks + +- Can be hidden by libraries to provide even higher level paradigms + + * e.g.: Twisted apps made parallel out of the box + +Race conditions +--------------- + +- They don't magically disappear + +- With explicit locks + + * ==> BOOM + + * you fix bugs by preventing race conditions + +- With atomic blocks + + * ==> Rollaback + + * Performance penalty + + * You optimize by preventing race conditions + +- Fast&broken vs. Slower&correct + + +Implementation +--------------- + +- Conflicts detection, commit and rollaback is costly + +- Original goal (2011): 2x-5x slower than PyPy without STM + + * But parallelizable! + +|pause| + +- Current goal (2014): 25% slower than PyPy without STM + +- Yes, that's 800x faster than original goal + +- mmap black magic + +Current status +--------------- + +- Preliminary versions of pypy-jit-stm available + +- The JIT overhead is still a bit too high + +- Lots of polishing needed + + +Thank you +--------- + +- http://pypy.org + +- http://morepypy.blogspot.com/ + +- http://antocuni.eu + +- Any question? + From noreply at buildbot.pypy.org Sat May 24 12:53:08 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Sat, 24 May 2014 12:53:08 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: changes contacts slide Message-ID: <20140524105308.24FD91C003C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5264:193f2f60e379 Date: 2014-05-24 12:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/193f2f60e379/ Log: changes contacts slide diff --git a/talk/pycon-italy-2014/talk.rst b/talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/pycon-italy-2014/talk.rst @@ -339,14 +339,20 @@ - Lots of polishing needed -Thank you ---------- +Contacts, Q&A +-------------- - http://pypy.org - http://morepypy.blogspot.com/ -- http://antocuni.eu +- twitter: @antocuni + +- Available for consultancy & training: + + * http://antocuni.eu + + * info at antocuni.eu - Any question? From noreply at buildbot.pypy.org Sat May 24 16:44:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 May 2014 16:44:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: The number "800x" is bogus, it's "8x-20x", which I'll round to 10x. Message-ID: <20140524144426.5F0BE1C0109@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5265:a8bd5d229c8f Date: 2014-05-24 16:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/a8bd5d229c8f/ Log: The number "800x" is bogus, it's "8x-20x", which I'll round to 10x. diff --git a/talk/pycon-italy-2014/talk.rst b/talk/pycon-italy-2014/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/pycon-italy-2014/talk.rst @@ -325,7 +325,7 @@ - Current goal (2014): 25% slower than PyPy without STM -- Yes, that's 800x faster than original goal +- Yes, that's 10x less overhead than original goal - mmap black magic From noreply at buildbot.pypy.org Sat May 24 18:57:29 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 24 May 2014 18:57:29 +0200 (CEST) Subject: [pypy-commit] pypy unify-call-ops: kill HighLevelOp.forced_opname Message-ID: <20140524165729.328E51C0109@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: unify-call-ops Changeset: r71695:db7249733b48 Date: 2014-05-24 05:46 +0100 http://bitbucket.org/pypy/pypy/changeset/db7249733b48/ Log: kill HighLevelOp.forced_opname diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -352,11 +352,6 @@ v_func = r_class.getclsfield(v_cls, self.methodname, hop.llops) hop2 = self.add_instance_arg_to_hop(hop, call_args) - opname = 'simple_call' - if call_args: - opname = 'call_args' - hop2.forced_opname = opname - hop2.v_s_insertfirstarg(v_func, s_func) # insert 'function' if type(hop2.args_r[0]) is SmallFunctionSetPBCRepr and type(r_func) is FunctionsPBCRepr: diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -10,7 +10,9 @@ from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.tool.pairtype import pairtype +from rpython.rtyper.rtyper import HighLevelOp from rpython.rtyper.rmodel import Repr from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.error import TyperError @@ -34,12 +36,10 @@ def redispatch_getfield(self, hop, index): rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2 = HighLevelOp(rtyper, spaceop, hop.exceptionlinks[:], hop.llops[:]) + hop2.setup() return hop2.dispatch() def rtype_getattr(self, hop): @@ -86,11 +86,10 @@ def redispatch_getfield(self, hop, index): rtyper = self.rtyper s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2 = HighLevelOp(rtyper, spaceop, hop.exceptionlinks[:], hop.llops[:]) + hop2.setup() return hop2.dispatch() def rtype_getattr(self, hop): diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -71,5 +71,5 @@ s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj hop2.s_result, hop2.r_result = s_new, r_new hop2.v_s_insertfirstarg(c_meth, s_meth) - hop2.forced_opname = 'simple_call' + hop2.spaceop.opname = 'simple_call' return hop2.dispatch() diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -650,8 +650,6 @@ class HighLevelOp(object): - forced_opname = None - def __init__(self, rtyper, spaceop, exceptionlinks, llops): self.rtyper = rtyper self.spaceop = spaceop @@ -679,12 +677,11 @@ if type(value) is list: # grunt value = value[:] setattr(result, key, value) - result.forced_opname = self.forced_opname return result def dispatch(self): rtyper = self.rtyper - opname = self.forced_opname or self.spaceop.opname + opname = self.spaceop.opname translate_meth = getattr(rtyper, 'translate_op_' + opname, rtyper.default_translate_operation) return translate_meth(self) From noreply at buildbot.pypy.org Sat May 24 18:57:30 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 24 May 2014 18:57:30 +0200 (CEST) Subject: [pypy-commit] pypy unify-call-ops: Don't fiddle with spaceop.opname in rtypedelegate() Message-ID: <20140524165730.641E91C0109@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: unify-call-ops Changeset: r71696:62eb2cc34cbc Date: 2014-05-24 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/62eb2cc34cbc/ Log: Don't fiddle with spaceop.opname in rtypedelegate() diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -1,4 +1,5 @@ from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.rtyper.error import TyperError from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -71,5 +72,7 @@ s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj hop2.s_result, hop2.r_result = s_new, r_new hop2.v_s_insertfirstarg(c_meth, s_meth) - hop2.spaceop.opname = 'simple_call' + spaceop = op.simple_call(*hop2.args_v) + spaceop.result = hop2.spaceop.result + hop2.spaceop = spaceop return hop2.dispatch() From noreply at buildbot.pypy.org Sat May 24 19:21:57 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 19:21:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove a pointless distinction between the base class and the subclass Message-ID: <20140524172157.B1DC61C003C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71697:d54efd025688 Date: 2014-05-24 12:21 -0500 http://bitbucket.org/pypy/pypy/changeset/d54efd025688/ Log: Remove a pointless distinction between the base class and the subclass diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -6,24 +6,27 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.error import TyperError -class TypeSystem(object): +class LowLevelTypeSystem(object): __metaclass__ = extendabletype + name = "lltypesystem" + callable_trait = (lltype.FuncType, lltype.functionptr) + def derefType(self, T): - raise NotImplementedError() + assert isinstance(T, lltype.Ptr) + return T.TO def deref(self, obj): - """Dereference `obj' to concrete object.""" - raise NotImplementedError() + assert isinstance(lltype.typeOf(obj), lltype.Ptr) + return obj._obj def check_null(self, repr, hop): - """Emit operations to check that `hop's argument is not a null object. -""" - raise NotImplementedError() + # None is a nullptr, which is false; everything else is true. + vlist = hop.inputargs(repr) + return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) def null_callable(self, T): - """null callable object of type T""" - raise NotImplementedError() + return lltype.nullptr(T.TO) def getcallabletype(self, ARGS, RESTYPE): cls = self.callable_trait[0] @@ -67,34 +70,8 @@ return constr(FT, name, **kwds) def getconcretetype(self, v): - """Helper called by getcallable() to get the conrete type of a variable -in a graph.""" - raise NotImplementedError() - - -class LowLevelTypeSystem(TypeSystem): - name = "lltypesystem" - callable_trait = (lltype.FuncType, lltype.functionptr) - - def derefType(self, T): - assert isinstance(T, lltype.Ptr) - return T.TO - - def deref(self, obj): - assert isinstance(lltype.typeOf(obj), lltype.Ptr) - return obj._obj - - def check_null(self, repr, hop): - # None is a nullptr, which is false; everything else is true. - vlist = hop.inputargs(repr) - return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) - - def getconcretetype(self, v): return v.concretetype - def null_callable(self, T): - return lltype.nullptr(T.TO) - def generic_is(self, robj1, robj2, hop): roriginal1 = robj1 roriginal2 = robj2 @@ -113,7 +90,7 @@ v_list = hop.inputargs(robj1, robj2) return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool) + # All typesystems are singletons LowLevelTypeSystem.instance = LowLevelTypeSystem() - getfunctionptr = LowLevelTypeSystem.instance.getcallable From noreply at buildbot.pypy.org Sat May 24 19:21:59 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 19:21:59 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140524172159.10D821C003C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71698:bddbc93c6eab Date: 2014-05-24 12:21 -0500 http://bitbucket.org/pypy/pypy/changeset/bddbc93c6eab/ Log: merged upstream diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -748,11 +748,11 @@ self.lockcounter = 0 def lock_held_by_someone_else(self): - return self.lockowner is not None and not self.lock_held() + me = self.space.getexecutioncontext() # used as thread ident + return self.lockowner is not None and self.lockowner is not me - def lock_held(self): - me = self.space.getexecutioncontext() # used as thread ident - return self.lockowner is me + def lock_held_by_anyone(self): + return self.lockowner is not None def acquire_lock(self): # this function runs with the GIL acquired so there is no race diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -165,7 +165,7 @@ def lock_held(space): if space.config.objspace.usemodules.thread: - return space.wrap(importing.getimportlock(space).lock_held()) + return space.wrap(importing.getimportlock(space).lock_held_by_anyone()) else: return space.w_False diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -15,6 +15,7 @@ 'empty': 'ctors.zeros', 'empty_like': 'ctors.empty_like', 'fromstring': 'ctors.fromstring', + 'frombuffer': 'ctors.frombuffer', 'concatenate': 'arrayops.concatenate', 'count_nonzero': 'arrayops.count_nonzero', diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop @@ -191,3 +192,62 @@ return _fromstring_bin(space, s, count, length, dtype) else: return _fromstring_text(space, s, count, sep, length, dtype) + + +def _getbuffer(space, w_buffer): + try: + return space.writebuf_w(w_buffer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + return space.readbuf_w(w_buffer) + + + at unwrap_spec(count=int, offset=int) +def frombuffer(space, w_buffer, w_dtype=None, count=-1, offset=0): + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero in type") + + try: + buf = _getbuffer(space, w_buffer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + w_buffer = space.getattr(w_buffer, space.wrap('__buffer__')) + buf = _getbuffer(space, w_buffer) + + ts = buf.getlength() + if offset < 0 or offset > ts: + raise oefmt(space.w_ValueError, + "offset must be non-negative and no greater than " + "buffer length (%d)", ts) + + s = ts - offset + if offset: + buf = SubBuffer(buf, offset, s) + + n = count + itemsize = dtype.elsize + assert itemsize > 0 + if n < 0: + if s % itemsize != 0: + raise oefmt(space.w_ValueError, + "buffer size must be a multiple of element size") + n = s / itemsize + else: + if s < n * itemsize: + raise oefmt(space.w_ValueError, + "buffer is smaller than requested size") + + try: + storage = buf.get_raw_address() + except ValueError: + a = W_NDimArray.from_shape(space, [n], dtype=dtype) + loop.fromstring_loop(space, a, dtype, itemsize, buf.as_str()) + return a + else: + writable = not buf.readonly + return W_NDimArray.from_shape_and_storage(space, [n], storage, dtype=dtype, + w_base=w_buffer, writable=writable) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3132,6 +3132,8 @@ class AppTestSupport(BaseNumpyAppTest): + spaceconfig = {'usemodules': ['micronumpy', 'array']} + def setup_class(cls): import struct BaseNumpyAppTest.setup_class.im_func(cls) @@ -3142,6 +3144,44 @@ cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) + def test_frombuffer(self): + import numpy as np + exc = raises(AttributeError, np.frombuffer, None) + assert str(exc.value) == "'NoneType' object has no attribute '__buffer__'" + exc = raises(AttributeError, np.frombuffer, memoryview(self.data)) + assert str(exc.value) == "'memoryview' object has no attribute '__buffer__'" + exc = raises(ValueError, np.frombuffer, self.data, 'S0') + assert str(exc.value) == "itemsize cannot be zero in type" + exc = raises(ValueError, np.frombuffer, self.data, offset=-1) + assert str(exc.value) == "offset must be non-negative and no greater than buffer length (32)" + exc = raises(ValueError, np.frombuffer, self.data, count=100) + assert str(exc.value) == "buffer is smaller than requested size" + for data in [self.data, buffer(self.data)]: + a = np.frombuffer(data) + for i in range(4): + assert a[i] == i + 1 + + import array + data = array.array('c', 'testing') + a = np.frombuffer(data, 'c') + assert a.base is data + a[2] = 'Z' + assert data.tostring() == 'teZting' + + data = buffer(data) + a = np.frombuffer(data, 'c') + assert a.base is data + exc = raises(ValueError, "a[2] = 'Z'") + assert str(exc.value) == "assignment destination is read-only" + + class A(object): + __buffer__ = 'abc' + + data = A() + a = np.frombuffer(data, 'c') + #assert a.base is data.__buffer__ + assert a.tostring() == 'abc' + def test_fromstring(self): import sys from numpypy import fromstring, dtype diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -62,6 +62,28 @@ self.waitfor(lambda: done) assert done + def test_lock_held_by_another_thread(self): + import thread, imp + lock_held = thread.allocate_lock() + test_complete = thread.allocate_lock() + lock_released = thread.allocate_lock() + def other_thread(): + imp.acquire_lock() # 3 + assert imp.lock_held() + lock_held.release() # 4 + test_complete.acquire() # 7 + imp.release_lock() # 8 + lock_released.release() # 9 + lock_held.acquire() + test_complete.acquire() + lock_released.acquire() + # + thread.start_new_thread(other_thread, ()) # 1 + lock_held.acquire() # 2 + assert imp.lock_held() # 5 + test_complete.release() # 6 + lock_released.acquire() # 10 + class TestImportLock: def test_lock(self, space, monkeypatch): from pypy.module.imp.importing import getimportlock, importhook From noreply at buildbot.pypy.org Sat May 24 19:26:57 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 19:26:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed two pieces of pointless indirection from the typesystem abstraction Message-ID: <20140524172657.169B61C003C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71699:9e4c880da845 Date: 2014-05-24 12:26 -0500 http://bitbucket.org/pypy/pypy/changeset/9e4c880da845/ Log: Removed two pieces of pointless indirection from the typesystem abstraction diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -643,7 +643,7 @@ return frame.eval() def op_direct_call(self, f, *args): - FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f)) + FTYPE = lltype.typeOf(f).TO return self.perform_call(f, FTYPE.ARGS, args) def op_indirect_call(self, f, *args): diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -10,11 +10,6 @@ __metaclass__ = extendabletype name = "lltypesystem" - callable_trait = (lltype.FuncType, lltype.functionptr) - - def derefType(self, T): - assert isinstance(T, lltype.Ptr) - return T.TO def deref(self, obj): assert isinstance(lltype.typeOf(obj), lltype.Ptr) @@ -29,8 +24,7 @@ return lltype.nullptr(T.TO) def getcallabletype(self, ARGS, RESTYPE): - cls = self.callable_trait[0] - return cls(ARGS, RESTYPE) + return lltype.FuncType(ARGS, RESTYPE) def getcallable(self, graph, getconcretetype=None): """Return callable given a Python function.""" @@ -39,9 +33,7 @@ llinputs = [getconcretetype(v) for v in graph.getargs()] lloutput = getconcretetype(graph.getreturnvar()) - typ, constr = self.callable_trait - - FT = typ(llinputs, lloutput) + FT = lltype.FuncType(llinputs, lloutput) name = graph.name if hasattr(graph, 'func') and callable(graph.func): # the Python function object can have _llfnobjattrs_, specifying @@ -58,16 +50,14 @@ # _callable is normally graph.func, but can be overridden: # see fakeimpl in extfunc.py _callable = fnobjattrs.pop('_callable', graph.func) - return constr(FT, name, graph = graph, _callable = _callable, - **fnobjattrs) + return lltype.functionptr(FT, name, graph = graph, + _callable = _callable, **fnobjattrs) else: - return constr(FT, name, graph = graph) + return lltype.functionptr(FT, name, graph = graph) def getexternalcallable(self, ll_args, ll_result, name, **kwds): - typ, constr = self.callable_trait - - FT = typ(ll_args, ll_result) - return constr(FT, name, **kwds) + FT = lltype.FuncType(ll_args, ll_result) + return lltype.functionptr(FT, name, **kwds) def getconcretetype(self, v): return v.concretetype From noreply at buildbot.pypy.org Sat May 24 19:29:12 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 19:29:12 +0200 (CEST) Subject: [pypy-commit] pypy default: removed an unused function Message-ID: <20140524172912.3EBC51C003C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71700:aa1f312b6444 Date: 2014-05-24 12:28 -0500 http://bitbucket.org/pypy/pypy/changeset/aa1f312b6444/ Log: removed an unused function diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -23,9 +23,6 @@ def null_callable(self, T): return lltype.nullptr(T.TO) - def getcallabletype(self, ARGS, RESTYPE): - return lltype.FuncType(ARGS, RESTYPE) - def getcallable(self, graph, getconcretetype=None): """Return callable given a Python function.""" if getconcretetype is None: From noreply at buildbot.pypy.org Sat May 24 19:32:13 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 19:32:13 +0200 (CEST) Subject: [pypy-commit] pypy default: unused local var Message-ID: <20140524173213.407F81C003C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71701:8f6c7df80cdc Date: 2014-05-24 12:31 -0500 http://bitbucket.org/pypy/pypy/changeset/8f6c7df80cdc/ Log: unused local var diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -53,7 +53,7 @@ self._initialized = setupstate.INPROGRESS try: self._setup_repr() - except TyperError, e: + except TyperError: self._initialized = setupstate.BROKEN raise else: From noreply at buildbot.pypy.org Sat May 24 20:26:03 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 20:26:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed pointless singleton Message-ID: <20140524182603.356D41D2CDF@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71702:88669e5d7fe2 Date: 2014-05-24 13:25 -0500 http://bitbucket.org/pypy/pypy/changeset/88669e5d7fe2/ Log: Removed pointless singleton diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -38,7 +38,7 @@ def __init__(self, annotator): self.annotator = annotator self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) - self.type_system = LowLevelTypeSystem.instance + self.type_system = LowLevelTypeSystem() self.reprs = {} self._reprs_must_call_setup = [] self._seen_reprs_must_call_setup = {} diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -1,14 +1,11 @@ """typesystem.py -- Typesystem-specific operations for RTyper.""" -from rpython.tool.pairtype import extendabletype - from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.error import TyperError + class LowLevelTypeSystem(object): - __metaclass__ = extendabletype - name = "lltypesystem" def deref(self, obj): @@ -24,41 +21,12 @@ return lltype.nullptr(T.TO) def getcallable(self, graph, getconcretetype=None): - """Return callable given a Python function.""" - if getconcretetype is None: - getconcretetype = self.getconcretetype - llinputs = [getconcretetype(v) for v in graph.getargs()] - lloutput = getconcretetype(graph.getreturnvar()) - - FT = lltype.FuncType(llinputs, lloutput) - name = graph.name - if hasattr(graph, 'func') and callable(graph.func): - # the Python function object can have _llfnobjattrs_, specifying - # attributes that are forced upon the functionptr(). The idea - # for not passing these extra attributes as arguments to - # getcallable() itself is that multiple calls to getcallable() - # for the same graph should return equal functionptr() objects. - if hasattr(graph.func, '_llfnobjattrs_'): - fnobjattrs = graph.func._llfnobjattrs_.copy() - # can specify a '_name', but use graph.name by default - name = fnobjattrs.pop('_name', name) - else: - fnobjattrs = {} - # _callable is normally graph.func, but can be overridden: - # see fakeimpl in extfunc.py - _callable = fnobjattrs.pop('_callable', graph.func) - return lltype.functionptr(FT, name, graph = graph, - _callable = _callable, **fnobjattrs) - else: - return lltype.functionptr(FT, name, graph = graph) + return getfunctionptr(graph, getconcretetype) def getexternalcallable(self, ll_args, ll_result, name, **kwds): FT = lltype.FuncType(ll_args, ll_result) return lltype.functionptr(FT, name, **kwds) - def getconcretetype(self, v): - return v.concretetype - def generic_is(self, robj1, robj2, hop): roriginal1 = robj1 roriginal2 = robj2 @@ -78,6 +46,35 @@ return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool) -# All typesystems are singletons -LowLevelTypeSystem.instance = LowLevelTypeSystem() -getfunctionptr = LowLevelTypeSystem.instance.getcallable +def _getconcretetype(v): + return v.concretetype + +def getfunctionptr(graph, getconcretetype=None): + """Return callable given a Python function.""" + if getconcretetype is None: + getconcretetype = _getconcretetype + llinputs = [getconcretetype(v) for v in graph.getargs()] + lloutput = getconcretetype(graph.getreturnvar()) + + FT = lltype.FuncType(llinputs, lloutput) + name = graph.name + if hasattr(graph, 'func') and callable(graph.func): + # the Python function object can have _llfnobjattrs_, specifying + # attributes that are forced upon the functionptr(). The idea + # for not passing these extra attributes as arguments to + # getcallable() itself is that multiple calls to getcallable() + # for the same graph should return equal functionptr() objects. + if hasattr(graph.func, '_llfnobjattrs_'): + fnobjattrs = graph.func._llfnobjattrs_.copy() + # can specify a '_name', but use graph.name by default + name = fnobjattrs.pop('_name', name) + else: + fnobjattrs = {} + # _callable is normally graph.func, but can be overridden: + # see fakeimpl in extfunc.py + _callable = fnobjattrs.pop('_callable', graph.func) + return lltype.functionptr(FT, name, graph = graph, + _callable = _callable, **fnobjattrs) + else: + return lltype.functionptr(FT, name, graph = graph) + From noreply at buildbot.pypy.org Sat May 24 21:02:25 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 21:02:25 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed another indirection on LowLevelTypeSystem Message-ID: <20140524190225.238971C0109@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71705:71a9f28d192f Date: 2014-05-24 13:58 -0500 http://bitbucket.org/pypy/pypy/changeset/71a9f28d192f/ Log: Removed another indirection on LowLevelTypeSystem diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -9,6 +9,7 @@ QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.typesystem import getfunctionptr from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer from rpython.translator.backendopt.graphanalyze import DependencyTracker @@ -168,7 +169,7 @@ because it is not needed there; it is only used by the blackhole interp to really do the call corresponding to 'inline_call' ops. """ - fnptr = self.rtyper.type_system.getcallable(graph) + fnptr = getfunctionptr(graph) FUNC = lltype.typeOf(fnptr).TO assert self.rtyper.type_system.name == "lltypesystem" fnaddr = llmemory.cast_ptr_to_adr(fnptr) diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -1,10 +1,12 @@ import py + from rpython.flowspace.model import SpaceOperation, Constant, Variable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.unsimplify import varoftype from rpython.rlib import jit +from rpython.jit.codewriter import support, call from rpython.jit.codewriter.call import CallControl -from rpython.jit.codewriter import support + class FakePolicy: def look_inside_graph(self, graph): @@ -151,18 +153,19 @@ # ____________________________________________________________ -def test_get_jitcode(): +def test_get_jitcode(monkeypatch): from rpython.jit.codewriter.test.test_flatten import FakeCPU class FakeRTyper: class annotator: translator = None class type_system: name = 'lltypesystem' - @staticmethod - def getcallable(graph): - F = lltype.FuncType([], lltype.Signed) - return lltype.functionptr(F, 'bar') - # + + def getfunctionptr(graph): + F = lltype.FuncType([], lltype.Signed) + return lltype.functionptr(F, 'bar') + + monkeypatch.setattr(call, 'getfunctionptr', getfunctionptr) cc = CallControl(FakeCPU(FakeRTyper())) class somegraph: name = "foo" diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -13,6 +13,7 @@ AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable) +from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pairtype @@ -218,7 +219,7 @@ links[-1].llexitcase = chr(i) startblock.closeblock(*links) self.rtyper.annotator.translator.graphs.append(graph) - ll_ret = self.rtyper.type_system.getcallable(graph) + ll_ret = getfunctionptr(graph) #FTYPE = FuncType c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) return c_ret diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -26,7 +26,7 @@ Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError -from rpython.rtyper.typesystem import LowLevelTypeSystem +from rpython.rtyper.typesystem import LowLevelTypeSystem, getfunctionptr from rpython.rtyper.normalizecalls import perform_normalizations from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block @@ -600,7 +600,7 @@ def getconcretetype(v): return self.bindingrepr(v).lowleveltype - return self.type_system.getcallable(graph, getconcretetype) + return getfunctionptr(graph, getconcretetype) def annotate_helper(self, ll_function, argtypes): """Annotate the given low-level helper function and return its graph diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -20,9 +20,6 @@ def null_callable(self, T): return lltype.nullptr(T.TO) - def getcallable(self, graph, getconcretetype=None): - return getfunctionptr(graph, getconcretetype) - def getexternalcallable(self, ll_args, ll_result, name, **kwds): FT = lltype.FuncType(ll_args, ll_result) return lltype.functionptr(FT, name, **kwds) @@ -49,6 +46,7 @@ def _getconcretetype(v): return v.concretetype + def getfunctionptr(graph, getconcretetype=None): """Return callable given a Python function.""" if getconcretetype is None: From noreply at buildbot.pypy.org Sat May 24 21:02:26 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 24 May 2014 21:02:26 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140524190226.458051C0109@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r71706:eb63b07ea99a Date: 2014-05-24 14:01 -0500 http://bitbucket.org/pypy/pypy/changeset/eb63b07ea99a/ Log: merged upstream diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -22,6 +22,8 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET +from rpython.rlib import jit + def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') @@ -592,6 +594,7 @@ addrlen_p[0] = rffi.cast(_c.socklen_t, maxlen) return addr, addr.addr_p, addrlen_p + @jit.dont_look_inside def accept(self): """Wait for an incoming connection. Return (new socket fd, client address).""" @@ -724,6 +727,7 @@ return make_socket(fd, self.family, self.type, self.proto, SocketClass=SocketClass) + @jit.dont_look_inside def getpeername(self): """Return the address of the remote endpoint.""" address, addr_p, addrlen_p = self._addrbuf() @@ -738,6 +742,7 @@ address.addrlen = rffi.cast(lltype.Signed, addrlen) return address + @jit.dont_look_inside def getsockname(self): """Return the address of the local endpoint.""" address, addr_p, addrlen_p = self._addrbuf() @@ -752,6 +757,7 @@ address.addrlen = rffi.cast(lltype.Signed, addrlen) return address + @jit.dont_look_inside def getsockopt(self, level, option, maxlen): buf = mallocbuf(maxlen) try: @@ -771,6 +777,7 @@ lltype.free(buf, flavor='raw') return result + @jit.dont_look_inside def getsockopt_int(self, level, option): flag_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: @@ -828,6 +835,7 @@ rwbuffer.setslice(0, buf) return len(buf) + @jit.dont_look_inside def recvfrom(self, buffersize, flags=0): """Like recv(buffersize, flags) but also return the sender's address.""" From noreply at buildbot.pypy.org Sat May 24 21:17:23 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:23 +0200 (CEST) Subject: [pypy-commit] pypy packaging: cleanup, try to create LICENSE on darwin Message-ID: <20140524191723.314FB1C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: packaging Changeset: r71707:b06df720007f Date: 2014-05-22 22:41 +0300 http://bitbucket.org/pypy/pypy/changeset/b06df720007f/ Log: cleanup, try to create LICENSE on darwin diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -1,9 +1,9 @@ #!/usr/bin/env python -""" A sample script that packages PyPy, provided that it's already built. +""" packages PyPy, provided that it's already built. It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py --base-dir pypy-base-dir [--options] + package.py [--options] Usually you would do: package.py --version-name pypy-VER-PLATFORM The output is found in the directory from --builddir, @@ -51,8 +51,6 @@ os.system("chmod -R g-w %s" % dirname) def generate_license_linux(base_file, options): - # We don't actually ship binaries with the pypy archive, - # so no extra license needed? with open(base_file) as fid: txt = fid.read() searches = [("bzip2","libbz2-*", "copyright", '---------'), @@ -81,20 +79,9 @@ txt += line if len(line.strip())<1: txt += '\n' + txt += third_party_header # Do something for gdbm, which is GPL - txt += '''\n\nLicenses and Acknowledgements for Incorporated Software -======================================================= - -This section is an incomplete, but growing list of licenses and acknowledgements -for third-party software incorporated in the PyPy distribution. - -''' - txt += '''gdbm ----- - -The gdbm module includes code from gdbm.h, which is distributed under the terms -of the GPL license version 2 or any later version. -''' + txt += gdbm_bit return txt def generate_license_windows(base_file, options): @@ -117,10 +104,21 @@ txt += fid.read() return txt +def generate_license_darwin(base_file, options): + # where are copyright files on macos? + try: + return generate_license_linux(base_file, options) + except: + import traceback; traceback.print_exc() + pass + with open(base_file) as fid: + txt = fid.read() + return txt + if sys.platform == 'win32': generate_license = generate_license_windows elif sys.platform == 'darwin': - generate_license = generate_license_linux + generate_license = generate_license_darwin else: generate_license = generate_license_linux @@ -359,6 +357,23 @@ assert '/' not in options.pypy_c return create_package(basedir, options) + +third_party_header = '''\n\nLicenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and acknowledgements +for third-party software incorporated in the PyPy distribution. + +''' + +gdbm_bit = '''gdbm +---- + +The gdbm module includes code from gdbm.h, which is distributed under the terms +of the GPL license version 2 or any later version. +''' + + if __name__ == '__main__': import sys package(*sys.argv[1:]) From noreply at buildbot.pypy.org Sat May 24 21:17:24 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:24 +0200 (CEST) Subject: [pypy-commit] pypy win32-stdlib: close abandoned branch Message-ID: <20140524191724.76CCD1C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-stdlib Changeset: r71708:ec4a79deebf2 Date: 2014-05-22 22:53 +0300 http://bitbucket.org/pypy/pypy/changeset/ec4a79deebf2/ Log: close abandoned branch From noreply at buildbot.pypy.org Sat May 24 21:17:25 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:25 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-argminmax: close abandoned branch Message-ID: <20140524191725.B891A1C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpypy-argminmax Changeset: r71709:871a521b6e5c Date: 2014-05-22 22:54 +0300 http://bitbucket.org/pypy/pypy/changeset/871a521b6e5c/ Log: close abandoned branch From noreply at buildbot.pypy.org Sat May 24 21:17:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:26 +0200 (CEST) Subject: [pypy-commit] pypy win32-distutils: close abandoned branch Message-ID: <20140524191726.E6EB01C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-distutils Changeset: r71710:0d7cae3d25d0 Date: 2014-05-22 22:56 +0300 http://bitbucket.org/pypy/pypy/changeset/0d7cae3d25d0/ Log: close abandoned branch From noreply at buildbot.pypy.org Sat May 24 21:17:28 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:28 +0200 (CEST) Subject: [pypy-commit] pypy python-numpy: close abandoned branch Message-ID: <20140524191728.1F1AF1C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: python-numpy Changeset: r71711:a0f336f2c38d Date: 2014-05-22 22:58 +0300 http://bitbucket.org/pypy/pypy/changeset/a0f336f2c38d/ Log: close abandoned branch From noreply at buildbot.pypy.org Sat May 24 21:17:29 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:29 +0200 (CEST) Subject: [pypy-commit] pypy mtrand: close abandoned branch Message-ID: <20140524191729.60E631C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: mtrand Changeset: r71712:66b52d5145a4 Date: 2014-05-22 23:02 +0300 http://bitbucket.org/pypy/pypy/changeset/66b52d5145a4/ Log: close abandoned branch From noreply at buildbot.pypy.org Sat May 24 21:17:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:30 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-best_base: reclose merged branch Message-ID: <20140524191730.912B81C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: cpyext-best_base Changeset: r71713:af184ac9d2cd Date: 2014-05-22 23:04 +0300 http://bitbucket.org/pypy/pypy/changeset/af184ac9d2cd/ Log: reclose merged branch From noreply at buildbot.pypy.org Sat May 24 21:17:31 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:31 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: merge closed win32-stdlib Message-ID: <20140524191731.B11871C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71714:6d154eeec77a Date: 2014-05-22 23:07 +0300 http://bitbucket.org/pypy/pypy/changeset/6d154eeec77a/ Log: merge closed win32-stdlib From noreply at buildbot.pypy.org Sat May 24 21:17:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:32 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: merge closed numpypy-argminmax Message-ID: <20140524191732.B89551C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71715:8552c7299027 Date: 2014-05-22 23:07 +0300 http://bitbucket.org/pypy/pypy/changeset/8552c7299027/ Log: merge closed numpypy-argminmax From noreply at buildbot.pypy.org Sat May 24 21:17:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:33 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: merge closed win32-distutils Message-ID: <20140524191733.BC0F91C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71716:2064314495e5 Date: 2014-05-22 23:08 +0300 http://bitbucket.org/pypy/pypy/changeset/2064314495e5/ Log: merge closed win32-distutils From noreply at buildbot.pypy.org Sat May 24 21:17:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:34 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: merge closed python-numpy Message-ID: <20140524191734.C4AE01C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71717:669620b135e2 Date: 2014-05-22 23:08 +0300 http://bitbucket.org/pypy/pypy/changeset/669620b135e2/ Log: merge closed python-numpy From noreply at buildbot.pypy.org Sat May 24 21:17:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:35 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: merge closed mtrand Message-ID: <20140524191735.D29831C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71718:32a480d45d12 Date: 2014-05-22 23:08 +0300 http://bitbucket.org/pypy/pypy/changeset/32a480d45d12/ Log: merge closed mtrand From noreply at buildbot.pypy.org Sat May 24 21:17:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:36 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: merge closed cpyext-best_base Message-ID: <20140524191736.E4C371C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71719:dcbfb4461e2f Date: 2014-05-22 23:09 +0300 http://bitbucket.org/pypy/pypy/changeset/dcbfb4461e2f/ Log: merge closed cpyext-best_base From noreply at buildbot.pypy.org Sat May 24 21:17:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 21:17:37 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: reclose this branch Message-ID: <20140524191737.E4E5A1C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: closed-branches Changeset: r71720:6eaf748cca21 Date: 2014-05-22 23:10 +0300 http://bitbucket.org/pypy/pypy/changeset/6eaf748cca21/ Log: reclose this branch From noreply at buildbot.pypy.org Sat May 24 22:11:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 22:11:42 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: cleanup Message-ID: <20140524201142.F0C401C003C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r71721:9a2be21902ab Date: 2014-05-24 23:09 +0300 http://bitbucket.org/pypy/pypy/changeset/9a2be21902ab/ Log: cleanup diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -88,7 +88,7 @@ assert np.FLOATING_POINT_SUPPORT == 1 def test_ufunc_instance(self): - from numpypy import add, ufunc + from numpy import add, ufunc assert isinstance(add, ufunc) assert repr(add) == "" @@ -96,7 +96,7 @@ assert add.__name__ == 'add' def test_ufunc_attrs(self): - from numpypy import add, multiply, sin + from numpy import add, multiply, sin assert add.identity == 0 assert multiply.identity == 1 @@ -107,7 +107,7 @@ assert sin.nin == 1 def test_wrong_arguments(self): - from numpypy import add, sin + from numpy import add, sin raises(ValueError, add, 1) raises(TypeError, add, 1, 2, 3) @@ -115,14 +115,14 @@ raises(ValueError, sin) def test_single_item(self): - from numpypy import negative, sign, minimum + from numpy import negative, sign, minimum assert negative(5.0) == -5.0 assert sign(-0.0) == 0.0 assert minimum(2.0, 3.0) == 2.0 def test_sequence(self): - from numpypy import array, ndarray, negative, minimum + from numpy import array, ndarray, negative, minimum a = array(range(3)) b = [2.0, 1.0, 0.0] c = 1.0 @@ -161,7 +161,7 @@ # and those that are uncallable can be accounted for. # test on the four base-class dtypes: int, bool, float, complex # We need this test since they have no common base class. - import numpypy as np + import numpy as np def find_uncallable_ufuncs(dtype): uncallable = set() array = np.array(1, dtype) @@ -187,12 +187,12 @@ 'copysign', 'signbit', 'ceil', 'floor', 'trunc']) def test_int_only(self): - from numpypy import bitwise_and, array + from numpy import bitwise_and, array a = array(1.0) raises(TypeError, bitwise_and, a, a) def test_negative(self): - from numpypy import array, negative + from numpy import array, negative a = array([-5.0, 0.0, 1.0]) b = negative(a) @@ -211,7 +211,7 @@ assert (b == [[-2, -4], [-6, -8]]).all() def test_abs(self): - from numpypy import array, absolute + from numpy import array, absolute a = array([-5.0, -0.0, 1.0]) b = absolute(a) @@ -219,7 +219,7 @@ assert b[i] == abs(a[i]) def test_add(self): - from numpypy import array, add + from numpy import array, add a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -228,7 +228,7 @@ assert c[i] == a[i] + b[i] def test_divide(self): - from numpypy import array, divide + from numpy import array, divide a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -240,8 +240,7 @@ def test_true_divide(self): import math - from numpypy import array, true_divide - import math + from numpy import array, true_divide a = array([0, 1, 2, 3, 4, 1, -1]) b = array([4, 4, 4, 4, 4, 0, 0]) @@ -251,7 +250,7 @@ assert math.isnan(true_divide(0, 0)) def test_fabs(self): - from numpypy import array, fabs + from numpy import array, fabs from math import fabs as math_fabs, isnan a = array([-5.0, -0.0, 1.0]) @@ -263,7 +262,7 @@ assert isnan(fabs(float('nan'))) def test_fmax(self): - from numpypy import fmax, array + from numpy import fmax, array import math nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') @@ -282,7 +281,7 @@ assert math.copysign(1., fmax(nnan, nan)) == math.copysign(1., nnan) def test_fmin(self): - from numpypy import fmin, array + from numpy import fmin import math nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') @@ -300,7 +299,7 @@ assert math.copysign(1., fmin(nnan, nan)) == math.copysign(1., nnan) def test_fmod(self): - from numpypy import fmod + from numpy import fmod import math assert fmod(-1e-100, 1e100) == -1e-100 @@ -361,7 +360,7 @@ assert b.imag == 0 def test_multiply(self): - from numpypy import array, multiply, arange + from numpy import array, multiply, arange a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -373,7 +372,7 @@ assert(multiply.reduce(a) == array([0, 3640, 12320])).all() def test_rint(self): - from numpypy import array, dtype, rint, isnan + from numpy import array, dtype, rint, isnan import sys nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') @@ -392,7 +391,7 @@ assert rint(sys.maxint) > 0.0 def test_sign(self): - from numpypy import array, sign, dtype + from numpy import array, sign, dtype reference = [-1.0, 0.0, 0.0, 1.0] a = array([-5.0, -0.0, 0.0, 6.0]) @@ -443,7 +442,7 @@ assert (b == reference).all() def test_subtract(self): - from numpypy import array, subtract + from numpy import array, subtract a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) @@ -452,7 +451,7 @@ assert c[i] == a[i] - b[i] def test_floorceiltrunc(self): - from numpypy import array, floor, ceil, trunc + from numpy import array, floor, ceil, trunc import math ninf, inf = float("-inf"), float("inf") a = array([ninf, -1.4, -1.5, -1.0, 0.0, 1.0, 1.4, 0.5, inf]) @@ -464,7 +463,7 @@ assert all([math.copysign(1, f(-abs(float("nan")))) == -1 for f in floor, ceil, trunc]) def test_round(self): - from numpypy import array, dtype + from numpy import array, dtype ninf, inf = float("-inf"), float("inf") a = array([ninf, -1.4, -1.5, -1.0, 0.0, 1.0, 1.4, 0.5, inf]) assert ([ninf, -1.0, -2.0, -1.0, 0.0, 1.0, 1.0, 0.0, inf] == a.round()).all() @@ -480,7 +479,7 @@ assert (c.round(0) == [10.+12.j, -15-100j, 0+11j]).all() def test_copysign(self): - from numpypy import array, copysign + from numpy import array, copysign reference = [5.0, -0.0, 0.0, -6.0] a = array([-5.0, 0.0, 0.0, 6.0]) @@ -496,7 +495,7 @@ def test_exp(self): import math - from numpypy import array, exp + from numpy import array, exp a = array([-5.0, -0.0, 0.0, 12345678.0, float("inf"), -float('inf'), -12343424.0]) @@ -510,7 +509,7 @@ def test_exp2(self): import math - from numpypy import array, exp2 + from numpy import array, exp2 inf = float('inf') ninf = -float('inf') nan = float('nan') @@ -529,7 +528,7 @@ def test_expm1(self): import math, cmath - from numpypy import array, expm1 + from numpy import array, expm1 inf = float('inf') ninf = -float('inf') nan = float('nan') @@ -548,7 +547,7 @@ def test_sin(self): import math - from numpypy import array, sin + from numpy import array, sin a = array([0, 1, 2, 3, math.pi, math.pi*1.5, math.pi*2]) b = sin(a) @@ -561,7 +560,7 @@ def test_cos(self): import math - from numpypy import array, cos + from numpy import array, cos a = array([0, 1, 2, 3, math.pi, math.pi*1.5, math.pi*2]) b = cos(a) @@ -570,7 +569,7 @@ def test_tan(self): import math - from numpypy import array, tan + from numpy import array, tan a = array([0, 1, 2, 3, math.pi, math.pi*1.5, math.pi*2]) b = tan(a) @@ -579,7 +578,7 @@ def test_arcsin(self): import math - from numpypy import array, arcsin + from numpy import array, arcsin a = array([-1, -0.5, -0.33, 0, 0.33, 0.5, 1]) b = arcsin(a) @@ -593,7 +592,7 @@ def test_arccos(self): import math - from numpypy import array, arccos + from numpy import array, arccos a = array([-1, -0.5, -0.33, 0, 0.33, 0.5, 1]) b = arccos(a) @@ -607,7 +606,7 @@ def test_arctan(self): import math - from numpypy import array, arctan + from numpy import array, arctan a = array([-3, -2, -1, 0, 1, 2, 3, float('inf'), float('-inf')]) b = arctan(a) @@ -620,7 +619,7 @@ def test_arctan2(self): import math - from numpypy import array, arctan2 + from numpy import array, arctan2 # From the numpy documentation assert ( @@ -635,7 +634,7 @@ def test_sinh(self): import math - from numpypy import array, sinh + from numpy import array, sinh a = array([-1, 0, 1, float('inf'), float('-inf')]) b = sinh(a) @@ -644,7 +643,7 @@ def test_cosh(self): import math - from numpypy import array, cosh + from numpy import array, cosh a = array([-1, 0, 1, float('inf'), float('-inf')]) b = cosh(a) @@ -653,7 +652,7 @@ def test_tanh(self): import math - from numpypy import array, tanh + from numpy import array, tanh a = array([-1, 0, 1, float('inf'), float('-inf')]) b = tanh(a) @@ -662,7 +661,7 @@ def test_arcsinh(self): import math - from numpypy import arcsinh + from numpy import arcsinh for v in [float('inf'), float('-inf'), 1.0, math.e]: assert math.asinh(v) == arcsinh(v) @@ -670,7 +669,7 @@ def test_arccosh(self): import math - from numpypy import arccosh + from numpy import arccosh for v in [1.0, 1.1, 2]: assert math.acosh(v) == arccosh(v) @@ -679,7 +678,7 @@ def test_arctanh(self): import math - from numpypy import arctanh + from numpy import arctanh for v in [.99, .5, 0, -.5, -.99]: assert math.atanh(v) == arctanh(v) @@ -690,7 +689,7 @@ def test_sqrt(self): import math - from numpypy import sqrt + from numpy import sqrt nan, inf = float("nan"), float("inf") data = [1, 2, 3, inf] @@ -701,7 +700,7 @@ def test_square(self): import math - from numpypy import square + from numpy import square nan, inf, ninf = float("nan"), float("inf"), float("-inf") @@ -714,7 +713,7 @@ def test_radians(self): import math - from numpypy import radians, array + from numpy import radians, array a = array([ -181, -180, -179, 181, 180, 179, @@ -727,7 +726,7 @@ def test_deg2rad(self): import math - from numpypy import deg2rad, array + from numpy import deg2rad, array a = array([ -181, -180, -179, 181, 180, 179, @@ -740,7 +739,7 @@ def test_degrees(self): import math - from numpypy import degrees, array + from numpy import degrees, array a = array([ -181, -180, -179, 181, 180, 179, @@ -753,7 +752,7 @@ def test_rad2deg(self): import math - from numpypy import rad2deg, array + from numpy import rad2deg, array a = array([ -181, -180, -179, 181, 180, 179, @@ -765,7 +764,7 @@ assert b[i] == math.degrees(a[i]) def test_reduce_errors(self): - from numpypy import sin, add, maximum, zeros + from numpy import sin, add, maximum, zeros raises(ValueError, sin.reduce, [1, 2, 3]) assert add.reduce(1) == 1 @@ -785,7 +784,7 @@ assert exc.value[0] == "'axis' entry is out of bounds" def test_reduce_1d(self): - from numpypy import array, add, maximum, less, float16, complex64 + from numpy import array, add, maximum, less, float16, complex64 assert less.reduce([5, 4, 3, 2, 1]) assert add.reduce([1, 2, 3]) == 6 @@ -800,21 +799,21 @@ assert type(add.reduce(array([True, False] * 200, dtype='complex64'))) is complex64 def test_reduceND(self): - from numpypy import add, arange + from numpy import add, arange a = arange(12).reshape(3, 4) assert (add.reduce(a, 0) == [12, 15, 18, 21]).all() assert (add.reduce(a, 1) == [6.0, 22.0, 38.0]).all() raises(ValueError, add.reduce, a, 2) def test_reduce_keepdims(self): - from numpypy import add, arange + from numpy import add, arange a = arange(12).reshape(3, 4) b = add.reduce(a, 0, keepdims=True) assert b.shape == (1, 4) assert (add.reduce(a, 0, keepdims=True) == [12, 15, 18, 21]).all() def test_bitwise(self): - from numpypy import bitwise_and, bitwise_or, bitwise_xor, arange, array + from numpy import bitwise_and, bitwise_or, bitwise_xor, arange, array a = arange(6).reshape(2, 3) assert (a & 1 == [[0, 1, 0], [1, 0, 1]]).all() assert (a & 1 == bitwise_and(a, 1)).all() @@ -824,7 +823,7 @@ raises(TypeError, 'array([1.0]) & 1') def test_unary_bitops(self): - from numpypy import bitwise_not, invert, array + from numpy import bitwise_not, invert, array a = array([1, 2, 3, 4]) assert (~a == [-2, -3, -4, -5]).all() assert (bitwise_not(a) == ~a).all() @@ -833,7 +832,7 @@ assert invert(False) == True def test_shift(self): - from numpypy import left_shift, right_shift, dtype + from numpy import left_shift, right_shift, dtype assert (left_shift([5, 1], [2, 13]) == [20, 2**13]).all() assert (right_shift(10, range(5)) == [10, 5, 2, 1, 0]).all() @@ -843,7 +842,7 @@ def test_comparisons(self): import operator - from numpypy import (equal, not_equal, less, less_equal, greater, + from numpy import (equal, not_equal, less, less_equal, greater, greater_equal, arange) for ufunc, func in [ @@ -872,7 +871,7 @@ assert val == False def test_count_nonzero(self): - from numpypy import count_nonzero + from numpy import count_nonzero assert count_nonzero(0) == 0 assert count_nonzero(1) == 1 assert count_nonzero([]) == 0 @@ -880,11 +879,11 @@ assert count_nonzero([[1, 2, 0], [1, 0, 2]]) == 4 def test_true_divide_2(self): - from numpypy import arange, array, true_divide + from numpy import arange, array, true_divide assert (true_divide(arange(3), array([2, 2, 2])) == array([0, 0.5, 1])).all() def test_isnan_isinf(self): - from numpypy import isnan, isinf, array, dtype + from numpy import isnan, isinf, array, dtype assert isnan(float('nan')) assert not isnan(3) assert not isinf(3) @@ -900,7 +899,7 @@ assert isinf(array([0.2])).dtype.kind == 'b' def test_logical_ops(self): - from numpypy import logical_and, logical_or, logical_xor, logical_not + from numpy import logical_and, logical_or, logical_xor, logical_not assert (logical_and([True, False , True, True], [1, 1, 3, 0]) == [True, False, True, False]).all() @@ -912,7 +911,7 @@ def test_logn(self): import math - from numpypy import log, log2, log10 + from numpy import log, log2, log10 for log_func, base in [(log, math.e), (log2, 2), (log10, 10)]: for v in [float('-nan'), float('-inf'), -1, float('nan')]: @@ -924,7 +923,7 @@ def test_log1p(self): import math - from numpypy import log1p + from numpy import log1p for v in [float('-nan'), float('-inf'), -2, float('nan')]: assert math.isnan(log1p(v)) @@ -935,7 +934,7 @@ def test_power_float(self): import math - from numpypy import power, array + from numpy import power, array a = array([1., 2., 3.]) b = power(a, 3) for i in range(len(a)): @@ -963,7 +962,7 @@ def test_power_int(self): import math - from numpypy import power, array + from numpy import power, array a = array([1, 2, 3]) b = power(a, 3) for i in range(len(a)): @@ -986,7 +985,7 @@ assert power(12345, -12345) == 0 def test_floordiv(self): - from numpypy import floor_divide, array + from numpy import floor_divide, array import math a = array([1., 2., 3., 4., 5., 6., 6.01]) b = floor_divide(a, 2.5) @@ -1010,7 +1009,7 @@ import math import sys float_max, float_min = sys.float_info.max, sys.float_info.min - from numpypy import logaddexp + from numpy import logaddexp # From the numpy documentation prob1 = math.log(1e-50) @@ -1036,7 +1035,7 @@ import math import sys float_max, float_min = sys.float_info.max, sys.float_info.min - from numpypy import logaddexp2 + from numpy import logaddexp2 log2 = math.log(2) # From the numpy documentation @@ -1060,7 +1059,7 @@ assert logaddexp2(float('inf'), float('inf')) == float('inf') def test_accumulate(self): - from numpypy import add, subtract, multiply, divide, arange, dtype + from numpy import add, subtract, multiply, divide, arange, dtype assert (add.accumulate([2, 3, 5]) == [2, 5, 10]).all() assert (multiply.accumulate([2, 3, 5]) == [2, 6, 30]).all() a = arange(4).reshape(2,2) @@ -1082,7 +1081,7 @@ assert divide.accumulate([True]*200).dtype == dtype('int8') def test_noncommutative_reduce_accumulate(self): - import numpypy as np + import numpy as np tosubtract = np.arange(5) todivide = np.array([2.0, 0.5, 0.25]) assert np.subtract.reduce(tosubtract) == -10 @@ -1094,7 +1093,7 @@ def test_outer(self): import numpy as np - from numpypy import absolute + from numpy import absolute exc = raises(ValueError, np.absolute.outer, [-1, -2]) assert exc.value[0] == 'outer product only supported for binary functions' From noreply at buildbot.pypy.org Sat May 24 22:11:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 24 May 2014 22:11:44 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: add failing test Message-ID: <20140524201144.3E7F81C003C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r71722:86cd75baa6be Date: 2014-05-24 23:10 +0300 http://bitbucket.org/pypy/pypy/changeset/86cd75baa6be/ Log: add failing test diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -94,6 +94,14 @@ assert repr(add) == "" assert repr(ufunc) == "" assert add.__name__ == 'add' + raises(TypeError, ufunc) + + def test_frompyfunc(self): + from numpy import ufunc, frompyfunc, arange + myufunc = frompyfunc(int.__add__, 2, 1) + assert isinstance(add, ufunc) + res = myufunc(arange(10), arange(10)) + assert all(res == arange(10) + arange(10)) def test_ufunc_attrs(self): from numpy import add, multiply, sin From noreply at buildbot.pypy.org Sun May 25 04:16:36 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 25 May 2014 04:16:36 +0200 (CEST) Subject: [pypy-commit] pypy unify-call-ops: fix r_os_stat.py Message-ID: <20140525021637.0057E1C0109@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: unify-call-ops Changeset: r71723:8ca76f81be34 Date: 2014-05-25 03:14 +0100 http://bitbucket.org/pypy/pypy/changeset/8ca76f81be34/ Log: fix r_os_stat.py diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -12,7 +12,6 @@ from rpython.flowspace.model import Constant from rpython.flowspace.operation import op from rpython.tool.pairtype import pairtype -from rpython.rtyper.rtyper import HighLevelOp from rpython.rtyper.rmodel import Repr from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.error import TyperError @@ -36,10 +35,14 @@ def redispatch_getfield(self, hop, index): rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() spaceop = op.getitem(hop.args_v[0], Constant(index)) spaceop.result = hop.spaceop.result - hop2 = HighLevelOp(rtyper, spaceop, hop.exceptionlinks[:], hop.llops[:]) - hop2.setup() + hop2.spaceop = spaceop + hop2.args_v = spaceop.args + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] return hop2.dispatch() def rtype_getattr(self, hop): @@ -86,10 +89,13 @@ def redispatch_getfield(self, hop, index): rtyper = self.rtyper s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2 = HighLevelOp(rtyper, spaceop, hop.exceptionlinks[:], hop.llops[:]) - hop2.setup() + spaceop.result = hop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] return hop2.dispatch() def rtype_getattr(self, hop): From noreply at buildbot.pypy.org Sun May 25 04:42:09 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 25 May 2014 04:42:09 +0200 (CEST) Subject: [pypy-commit] pypy unify-call-ops: kill bookkeeper.build_args Message-ID: <20140525024209.766801C003C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: unify-call-ops Changeset: r71724:9d5ad8746f40 Date: 2014-05-25 03:41 +0100 http://bitbucket.org/pypy/pypy/changeset/9d5ad8746f40/ Log: kill bookkeeper.build_args diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -18,7 +18,7 @@ from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import simple_args, complex_args +from rpython.annotator.argument import simple_args from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper import extregistry @@ -103,8 +103,9 @@ self.consider_call_site(call_op) for pbc, args_s in self.emulated_pbc_calls.itervalues(): - self.consider_call_site_for_pbc(pbc, 'simple_call', - args_s, s_ImpossibleValue, None) + args = simple_args(args_s) + self.consider_call_site_for_pbc(pbc, args, + s_ImpossibleValue, None) self.emulated_pbc_calls = {} finally: self.leave() @@ -152,16 +153,16 @@ args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s if isinstance(s_callable, SomePBC): s_result = binding(call_op.result, s_ImpossibleValue) - self.consider_call_site_for_pbc(s_callable, call_op.opname, args_s, + args = call_op.build_args(args_s) + self.consider_call_site_for_pbc(s_callable, args, s_result, call_op) - def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result, + def consider_call_site_for_pbc(self, s_callable, args, s_result, call_op): descs = list(s_callable.descriptions) if not descs: return family = descs[0].getcallfamily() - args = self.build_args(opname, args_s) s_callable.getKind().consider_call_site(self, family, descs, args, s_result, call_op) @@ -562,12 +563,6 @@ assert self.annotator.binding(op.args[pos]) == s_type return op - def build_args(self, op, args_s): - if op == "simple_call": - return simple_args(args_s) - elif op == "call_args": - return complex_args(args_s) - def ondegenerated(self, what, s_value, where=None, called_from_graph=None): self.annotator.ondegenerated(what, s_value, where=where, called_from_graph=called_from_graph) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -14,6 +14,7 @@ SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc from rpython.annotator.model import SomeTuple +from rpython.annotator.argument import ArgumentsForTranslation from rpython.flowspace.specialcase import SPECIAL_CASES @@ -511,6 +512,9 @@ return sc(ctx, *args_w) return ctx.do_op(self) + def build_args(self, args_s): + return ArgumentsForTranslation(list(args_s)) + class CallArgs(SingleDispatchMixin, CallOp): opname = 'call_args' @@ -529,6 +533,10 @@ "should not call %r with keyword arguments" % (fn,)) return ctx.do_op(self) + def build_args(self, args_s): + return ArgumentsForTranslation.fromshape(args_s[0].const, + list(args_s[1:])) + # Other functions that get directly translated to SpaceOperators func2op[type] = op.type diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -31,7 +31,7 @@ getrinputs(rtyper, graph), getrresult(rtyper, graph)) -def callparse(rtyper, graph, hop, opname, r_self=None): +def callparse(rtyper, graph, hop, r_self=None): """Parse the arguments of 'hop' when calling the given 'graph'. """ rinputs = getrinputs(rtyper, graph) @@ -43,6 +43,7 @@ else: start = 0 rinputs[0] = r_self + opname = hop.spaceop.opname if opname == "simple_call": arguments = ArgumentsForRtype(args_h(start)) elif opname == "call_args": diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -182,10 +182,10 @@ return self.convert_desc(funcdesc) def rtype_simple_call(self, hop): - return self.call('simple_call', hop) + return self.call(hop) def rtype_call_args(self, hop): - return self.call('call_args', hop) + return self.call(hop) def dispatcher(self, shape, index, argtypes, resulttype): key = shape, index, tuple(argtypes), resulttype @@ -223,9 +223,9 @@ c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) return c_ret - def call(self, opname, hop): + def call(self, hop): bk = self.rtyper.annotator.bookkeeper - args = bk.build_args(opname, hop.args_s[1:]) + args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site @@ -233,7 +233,7 @@ row_of_graphs = self.callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness vlist = [hop.inputarg(self, arg=0)] - vlist += callparse.callparse(self.rtyper, anygraph, hop, opname) + vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() v_dispatcher = self.dispatcher(shape, index, [v.concretetype for v in vlist[1:]], rresult.lowleveltype) diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -300,14 +300,14 @@ return inputconst(typeOf(llfn), llfn) def rtype_simple_call(self, hop): - return self.call('simple_call', hop) + return self.call(hop) def rtype_call_args(self, hop): - return self.call('call_args', hop) + return self.call(hop) - def call(self, opname, hop): + def call(self, hop): bk = self.rtyper.annotator.bookkeeper - args = bk.build_args(opname, hop.args_s[1:]) + args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site @@ -317,7 +317,7 @@ vfn = hop.inputarg(self, arg=0) vlist = [self.convert_to_concrete_llfn(vfn, shape, index, hop.llops)] - vlist += callparse.callparse(self.rtyper, anygraph, hop, opname) + vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() if isinstance(vlist[0], Constant): From noreply at buildbot.pypy.org Sun May 25 09:50:36 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 25 May 2014 09:50:36 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Stop setting backend to llvm in translate.py. The idea was to run the buildbots with the LLVM translation backend without modification. However, this doesn't work because the binary name 'pypy-c' is hardcoded in parts of the build configuration. Message-ID: <20140525075036.6C6D31C02D4@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r71725:3d5cd3291a68 Date: 2014-05-25 09:49 +0200 http://bitbucket.org/pypy/pypy/changeset/3d5cd3291a68/ Log: Stop setting backend to llvm in translate.py. The idea was to run the buildbots with the LLVM translation backend without modification. However, this doesn't work because the binary name 'pypy-c' is hardcoded in parts of the build configuration. diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -109,7 +109,6 @@ opt_parser.disable_interspersed_args() config = get_combined_translation_config(translating=True) - config.translation.backend = 'llvm' to_optparse(config, parser=opt_parser, useoptions=['translation.*']) translateconfig = Config(translate_optiondescr) to_optparse(translateconfig, parser=opt_parser) From noreply at buildbot.pypy.org Sun May 25 17:23:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 May 2014 17:23:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Add comment inline Message-ID: <20140525152326.A8C651C048F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71726:6d4973cec392 Date: 2014-05-25 17:22 +0200 http://bitbucket.org/pypy/pypy/changeset/6d4973cec392/ Log: Add comment inline diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -23,6 +23,9 @@ from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET from rpython.rlib import jit +# Usage of @jit.dont_look_inside in this file is possibly temporary +# and only because some lltypes declared in _rsocket_rffi choke the +# JIT's codewriter right now (notably, FixedSizeArray). def mallocbuf(buffersize): From noreply at buildbot.pypy.org Sun May 25 20:13:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 25 May 2014 20:13:50 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Fix raw_load() operation with addr of type Signed. Message-ID: <20140525181350.E48FB1C02D4@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r71727:7f3ae0113be8 Date: 2014-05-25 20:13 +0200 http://bitbucket.org/pypy/pypy/changeset/7f3ae0113be8/ Log: Fix raw_load() operation with addr of type Signed. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1326,7 +1326,7 @@ t1 = self._tmp(PtrType(LLVMChar)) t2 = self._tmp(PtrType(LLVMChar)) t3 = self._tmp(PtrType(ptr_to)) - self.w('{t1.V} = bitcast {addr.TV} to {t1.T}'.format(**locals())) + self._cast(t1, addr) self.w('{t2.V} = getelementptr inbounds {t1.TV}, {offset.TV}' .format(**locals())) self.w('{t3.V} = bitcast {t2.TV} to {t3.T}'.format(**locals())) From noreply at buildbot.pypy.org Sun May 25 21:05:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 May 2014 21:05:03 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: A variant not using r15 at all, but an extra argument. Awful Hack Hack Message-ID: <20140525190503.03B451C13AA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71728:1254999ae792 Date: 2014-05-25 21:04 +0200 http://bitbucket.org/pypy/pypy/changeset/1254999ae792/ Log: A variant not using r15 at all, but an extra argument. Awful Hack Hack Hack for now. diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -21,6 +21,12 @@ lltype.Struct('rpy_shadowstack_s', hints={"external": "C", "c_name": "rpy_shadowstack_s"})) + def gct_gc_stack_bottom(self, hop): + self.ensure_ss_graph_marker(0) + block = self._transforming_graph.startblock + op = SpaceOperation('gc_stack_bottom', [], varoftype(lltype.Void)) + block.operations.insert(0, op) + def build_root_walker(self): return ShadowStackRootWalker(self) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -438,9 +438,6 @@ 'gc_gettypeptr_group': LLOp(canfold=True), 'get_member_index': LLOp(canfold=True), - 'getfield_exc_type': LLOp(sideeffects=False), - 'setfield_exc_type': LLOp(), - # __________ used by the JIT ________ 'jit_marker': LLOp(), diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -118,7 +118,7 @@ return node.gettype() elif isinstance(T, FuncType): resulttype = self.gettype(T.RESULT) - argtypes = [] + argtypes = ['struct rpy_shadowstack_s *rpy_shadowstack'] for i in range(len(T.ARGS)): if T.ARGS[i] is not Void: argtype = self.gettype(T.ARGS[i]) diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -407,7 +407,8 @@ r = self.expr(op.result) return 'OP_CALL_ARGS((%s), %s);' % (', '.join(args), r) - def generic_call(self, FUNC, fnexpr, args_v, v_result, targets=None): + def generic_call(self, FUNC, fnexpr, args_v, v_result, targets=None, + to_internal=True): args = [] assert len(args_v) == len(FUNC.TO.ARGS) for v, ARGTYPE in zip(args_v, FUNC.TO.ARGS): @@ -418,6 +419,8 @@ # XXX is this still needed now that rctypes is gone if isinstance(ARGTYPE, ContainerType): args[-1] = '*%s' % (args[-1],) + if to_internal: + args.insert(0, 'rpy_shadowstack') line = '%s(%s);' % (fnexpr, ', '.join(args)) if self.lltypemap(v_result) is not Void: @@ -438,7 +441,8 @@ except AttributeError: targets = None return self.generic_call(fn.concretetype, self.expr(fn), - op.args[1:], op.result, targets) + op.args[1:], op.result, targets, + to_internal = targets is not None) def OP_INDIRECT_CALL(self, op): fn = op.args[0] diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -431,18 +431,22 @@ raise AssertionError(subopnum) return ' '.join(parts) - def OP_GC_STACK_BOTTOM(self, funcgen, op): - return 'pypy_asm_stack_bottom();' - - def OP_GC_STACK_TOP(self, funcgen, op): - return 'pypy_asm_stack_top();' - class ShadowStackFrameworkGcPolicy(BasicFrameworkGcPolicy): def gettransformer(self): from rpython.memory.gctransform import shadowstack return shadowstack.ShadowStackFrameworkGCTransformer(self.db.translator) + def gc_startup_code(self): + fnptr = self.db.gctransformer.frameworkgc_setup_ptr.value + yield '%s(NULL);' % (self.db.get(fnptr),) + + def OP_GC_STACK_BOTTOM(self, funcgen, op): + return 'RPY_SS_STACK_BOTTOM();' + + def OP_GC_STACK_TOP(self, funcgen, op): + return 'RPY_SS_STACK_TOP();' + def OP_GC_SS_GRAPH_MARKER(self, funcgen, op): marker = funcgen.expr(op.result) count = op.args[0].value diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -782,6 +782,7 @@ incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') + fi.write('struct rpy_shadowstack_s;\n') # # Header diff --git a/rpython/translator/c/src/debug_traceback.c b/rpython/translator/c/src/debug_traceback.c --- a/rpython/translator/c/src/debug_traceback.c +++ b/rpython/translator/c/src/debug_traceback.c @@ -13,7 +13,7 @@ { int i; int skipping; - void *my_etype = RPyFetchExceptionType(); + void *my_etype = RPyFetchExceptionType(NULL); struct pypydtpos_s *location; void *etype; int has_loc; @@ -67,6 +67,6 @@ { pypy_debug_traceback_print(); fprintf(stderr, "Fatal RPython error: %s\n", - RPyFetchExceptionType()->ov_name->items); + RPyFetchExceptionType(NULL)->ov_name->items); abort(); } diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -31,6 +31,7 @@ char *errmsg; int i, exitcode; RPyListOfString *list; + struct rpy_shadowstack_s *rpy_shadowstack; #ifdef PYPY_USE_ASMGCC pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; @@ -49,23 +50,20 @@ errmsg = RPython_StartupCode(); if (errmsg) goto error; - pypy_asm_stack_bottom(); - list = _RPyListOfString_New(argc); - if (RPyExceptionOccurred()) goto memory_out; + RPY_SS_STACK_BOTTOM(); + list = _RPyListOfString_New(rpy_shadowstack, argc); + if (RPyExceptionOccurred(rpy_shadowstack)) goto memory_out; for (i=0; i 0 ? (char *)(marker + count) - 2 \ : marker[0].s -static inline void pypy_asm_stack_bottom(void) -{ - void *s = pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_root_stack_top; - rpy_shadowstack = s; -} +#define RPY_SS_STACK_BOTTOM() \ + rpy_shadowstack = \ + pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_root_stack_top -static inline void pypy_asm_stack_top(void) -{ - void *s = rpy_shadowstack; - pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_root_stack_top = s; -} - -#define OP_GETFIELD_EXC_TYPE(r) \ - if (__builtin_expect(((Signed)rpy_shadowstack) & 1, 0)) { \ - r = (struct pypy_object_vtable0 *)(((char *)rpy_shadowstack) - 1); \ - if (!r) __builtin_unreachable(); \ - } \ - else { \ - r = NULL; \ - } -#define OP_SETFIELD_EXC_TYPE(x, r) \ - rpy_shadowstack = (x) ? ((char *)(x)) + 1 : NULL +#define RPY_SS_STACK_TOP() \ + pypy_g_rpython_memory_gctypelayout_GCData.gcd_inst_root_stack_top = \ + rpy_shadowstack #endif diff --git a/rpython/translator/c/src/rtyper.c b/rpython/translator/c/src/rtyper.c --- a/rpython/translator/c/src/rtyper.c +++ b/rpython/translator/c/src/rtyper.c @@ -37,10 +37,10 @@ } } -RPyString *RPyString_FromString(char *buf) +RPyString *RPyString_FromString(struct rpy_shadowstack_s *rpy_shadowstack, char *buf) { int length = strlen(buf); - RPyString *rps = RPyString_New(length); + RPyString *rps = RPyString_New(rpy_shadowstack, length); memcpy(rps->rs_chars.items, buf, length); return rps; } diff --git a/rpython/translator/c/src/rtyper.h b/rpython/translator/c/src/rtyper.h --- a/rpython/translator/c/src/rtyper.h +++ b/rpython/translator/c/src/rtyper.h @@ -11,4 +11,4 @@ char *RPyString_AsCharP(RPyString *rps); void RPyString_FreeCache(void); -RPyString *RPyString_FromString(char *buf); +RPyString *RPyString_FromString(struct rpy_shadowstack_s *rpy_shadowstack, char *buf); diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -67,19 +67,17 @@ self.c_n_i_error_ll_exc_type = constant_value(n_i_error_ll_exc_type) def rpyexc_occured(): - exc_type = lloperation.llop.getfield_exc_type( - self.lltype_of_exception_type) + exc_type = exc_data.exc_type return bool(exc_type) def rpyexc_fetch_type(): - return lloperation.llop.getfield_exc_type( - self.lltype_of_exception_type) + return exc_data.exc_type def rpyexc_fetch_value(): return exc_data.exc_value def rpyexc_clear(): - lloperation.llop.setfield_exc_type(lltype.Void, null_type) + exc_data.exc_type = null_type exc_data.exc_value = null_value def rpyexc_raise(etype, evalue): @@ -92,12 +90,12 @@ # us to see at least part of the traceback for them. ll_assert(etype != assertion_error_ll_exc_type, "AssertionError") ll_assert(etype != n_i_error_ll_exc_type, "NotImplementedError") - lloperation.llop.setfield_exc_type(lltype.Void, etype) + exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_start_traceback(lltype.Void, etype) def rpyexc_reraise(etype, evalue): - lloperation.llop.setfield_exc_type(lltype.Void, etype) + exc_data.exc_type = etype exc_data.exc_value = evalue lloperation.llop.debug_reraise_traceback(lltype.Void, etype) @@ -108,8 +106,7 @@ def rpyexc_restore_exception(evalue): if evalue: - lloperation.llop.setfield_exc_type(lltype.Void, - ll_inst_type(evalue)) + exc_data.exc_type = ll_inst_type(evalue) exc_data.exc_value = evalue self.rpyexc_occured_ptr = self.build_func( @@ -146,15 +143,15 @@ lltype.Void, jitcallkind='rpyexc_raise') # for the JIT - #self.rpyexc_fetch_exception_ptr = self.build_func( - # "RPyFetchException", - # rpyexc_fetch_exception, - # [], self.lltype_of_exception_value) + self.rpyexc_fetch_exception_ptr = self.build_func( + "RPyFetchException", + rpyexc_fetch_exception, + [], self.lltype_of_exception_value) - #self.rpyexc_restore_exception_ptr = self.build_func( - # "RPyRestoreException", - # self.noinline(rpyexc_restore_exception), - # [self.lltype_of_exception_value], lltype.Void) + self.rpyexc_restore_exception_ptr = self.build_func( + "RPyRestoreException", + self.noinline(rpyexc_restore_exception), + [self.lltype_of_exception_value], lltype.Void) self.build_extra_funcs() @@ -464,6 +461,7 @@ def setup_excdata(self): EXCDATA = lltype.Struct('ExcData', + ('exc_type', self.lltype_of_exception_type), ('exc_value', self.lltype_of_exception_value)) self.EXCDATA = EXCDATA @@ -484,17 +482,11 @@ return Constant(fn_ptr, lltype.Ptr(FUNC_TYPE)) def gen_getfield(self, name, llops): - if name == 'exc_type': - return llops.genop('getfield_exc_type', [], - resulttype = self.lltype_of_exception_type) c_name = inputconst(lltype.Void, name) return llops.genop('getfield', [self.cexcdata, c_name], resulttype = getattr(self.EXCDATA, name)) def gen_setfield(self, name, v_value, llops): - if name == 'exc_type': - llops.genop('setfield_exc_type', [v_value]) - return c_name = inputconst(lltype.Void, name) llops.genop('setfield', [self.cexcdata, c_name, v_value]) @@ -523,7 +515,6 @@ exc_data = self.exc_data_ptr def rpyexc_get_exception_addr(): - raise NotImplementedError return (llmemory.cast_ptr_to_adr(exc_data) + llmemory.offsetof(EXCDATA, 'exc_type')) From noreply at buildbot.pypy.org Sun May 25 21:14:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 May 2014 21:14:56 +0200 (CEST) Subject: [pypy-commit] pypy shadowstack-again: For comparison, the variant directly without r15. Message-ID: <20140525191456.6E46E1C0109@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: shadowstack-again Changeset: r71729:92e70fc3564f Date: 2014-05-25 21:12 +0200 http://bitbucket.org/pypy/pypy/changeset/92e70fc3564f/ Log: For comparison, the variant directly without r15. diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -202,7 +202,7 @@ /*********************************/ #if defined(__GNUC__) && defined(__amd64__) -# define RPY_SHADOWSTACK_REG "r15" +//# define RPY_SHADOWSTACK_REG "r15" #endif struct rpy_shadowstack_s { void *s; }; From noreply at buildbot.pypy.org Sun May 25 21:51:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 25 May 2014 21:51:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Add comments saying that PyGILState_Ensure() is pretty bogus. Message-ID: <20140525195109.D07A51D2CF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71730:01d8c6a4682b Date: 2014-05-25 21:50 +0200 http://bitbucket.org/pypy/pypy/changeset/01d8c6a4682b/ Log: Add comments saying that PyGILState_Ensure() is pretty bogus. diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -208,6 +208,9 @@ @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): + # XXX XXX XXX THIS IS A VERY MINIMAL IMPLEMENTATION THAT WILL HAPPILY + # DEADLOCK IF CALLED TWICE ON THE SAME THREAD, OR CRASH IF CALLED IN A + # NEW THREAD. We should very carefully follow what CPython does instead. if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() @@ -215,6 +218,7 @@ @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): + # XXX XXX XXX We should very carefully follow what CPython does instead. if rffi.aroundstate.before: # Before external call is after running Python rffi.aroundstate.before() From noreply at buildbot.pypy.org Mon May 26 01:08:18 2014 From: noreply at buildbot.pypy.org (stefanor) Date: Mon, 26 May 2014 01:08:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Raise ProcessorAutodetectError for unknown machine names Message-ID: <20140525230818.AC3FC1C02D4@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: Changeset: r71731:eacab5168076 Date: 2014-05-26 01:07 +0200 http://bitbucket.org/pypy/pypy/changeset/eacab5168076/ Log: Raise ProcessorAutodetectError for unknown machine names Previously this function was only used for the JIT, so it only had to support machines that the JIT supports. Now, it's used by rpython.rlib.rawstorage, and should raise an appropriate exception, rather than KeyError, on unknown machines. diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -63,7 +63,10 @@ 'AMD64': MODEL_X86, # win64 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, - }[mach] + }.get(mach) + + if result is None: + raise ProcessorAutodetectError, "unknown machine name %s" % mach # if result.startswith('x86'): if sys.maxint == 2**63-1: From noreply at buildbot.pypy.org Mon May 26 04:58:12 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 26 May 2014 04:58:12 +0200 (CEST) Subject: [pypy-commit] pypy unify-call-ops: mention branch in whatsnew Message-ID: <20140526025812.A4B231C02D4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: unify-call-ops Changeset: r71732:f72cecd5c9ba Date: 2014-05-26 03:49 +0100 http://bitbucket.org/pypy/pypy/changeset/f72cecd5c9ba/ Log: mention branch in whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -11,3 +11,4 @@ .. branch: release-2.3.x +.. branch: unify-call-ops From noreply at buildbot.pypy.org Mon May 26 04:58:14 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 26 May 2014 04:58:14 +0200 (CEST) Subject: [pypy-commit] pypy unify-call-ops: Close branch before merging. Message-ID: <20140526025814.0F9591C02D4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: unify-call-ops Changeset: r71733:a6ea52fb5ebd Date: 2014-05-26 03:50 +0100 http://bitbucket.org/pypy/pypy/changeset/a6ea52fb5ebd/ Log: Close branch before merging. From noreply at buildbot.pypy.org Mon May 26 04:58:15 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 26 May 2014 04:58:15 +0200 (CEST) Subject: [pypy-commit] pypy default: merge branch unify-call-ops Message-ID: <20140526025815.9164A1C02D4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71734:03667db26477 Date: 2014-05-26 03:57 +0100 http://bitbucket.org/pypy/pypy/changeset/03667db26477/ Log: merge branch unify-call-ops diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -11,3 +11,4 @@ .. branch: release-2.3.x +.. branch: unify-call-ops diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -18,7 +18,7 @@ from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import simple_args, complex_args +from rpython.annotator.argument import simple_args from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper import extregistry @@ -103,8 +103,9 @@ self.consider_call_site(call_op) for pbc, args_s in self.emulated_pbc_calls.itervalues(): - self.consider_call_site_for_pbc(pbc, 'simple_call', - args_s, s_ImpossibleValue, None) + args = simple_args(args_s) + self.consider_call_site_for_pbc(pbc, args, + s_ImpossibleValue, None) self.emulated_pbc_calls = {} finally: self.leave() @@ -152,16 +153,16 @@ args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s if isinstance(s_callable, SomePBC): s_result = binding(call_op.result, s_ImpossibleValue) - self.consider_call_site_for_pbc(s_callable, call_op.opname, args_s, + args = call_op.build_args(args_s) + self.consider_call_site_for_pbc(s_callable, args, s_result, call_op) - def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result, + def consider_call_site_for_pbc(self, s_callable, args, s_result, call_op): descs = list(s_callable.descriptions) if not descs: return family = descs[0].getcallfamily() - args = self.build_args(opname, args_s) s_callable.getKind().consider_call_site(self, family, descs, args, s_result, call_op) @@ -562,12 +563,6 @@ assert self.annotator.binding(op.args[pos]) == s_type return op - def build_args(self, op, args_s): - if op == "simple_call": - return simple_args(args_s) - elif op == "call_args": - return complex_args(args_s) - def ondegenerated(self, what, s_value, where=None, called_from_graph=None): self.annotator.ondegenerated(what, s_value, where=where, called_from_graph=called_from_graph) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -14,6 +14,7 @@ SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc from rpython.annotator.model import SomeTuple +from rpython.annotator.argument import ArgumentsForTranslation from rpython.flowspace.specialcase import SPECIAL_CASES @@ -511,6 +512,9 @@ return sc(ctx, *args_w) return ctx.do_op(self) + def build_args(self, args_s): + return ArgumentsForTranslation(list(args_s)) + class CallArgs(SingleDispatchMixin, CallOp): opname = 'call_args' @@ -529,6 +533,10 @@ "should not call %r with keyword arguments" % (fn,)) return ctx.do_op(self) + def build_args(self, args_s): + return ArgumentsForTranslation.fromshape(args_s[0].const, + list(args_s[1:])) + # Other functions that get directly translated to SpaceOperators func2op[type] = op.type diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -31,7 +31,7 @@ getrinputs(rtyper, graph), getrresult(rtyper, graph)) -def callparse(rtyper, graph, hop, opname, r_self=None): +def callparse(rtyper, graph, hop, r_self=None): """Parse the arguments of 'hop' when calling the given 'graph'. """ rinputs = getrinputs(rtyper, graph) @@ -43,6 +43,7 @@ else: start = 0 rinputs[0] = r_self + opname = hop.spaceop.opname if opname == "simple_call": arguments = ArgumentsForRtype(args_h(start)) elif opname == "call_args": diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -183,10 +183,10 @@ return self.convert_desc(funcdesc) def rtype_simple_call(self, hop): - return self.call('simple_call', hop) + return self.call(hop) def rtype_call_args(self, hop): - return self.call('call_args', hop) + return self.call(hop) def dispatcher(self, shape, index, argtypes, resulttype): key = shape, index, tuple(argtypes), resulttype @@ -224,9 +224,9 @@ c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) return c_ret - def call(self, opname, hop): + def call(self, hop): bk = self.rtyper.annotator.bookkeeper - args = bk.build_args(opname, hop.args_s[1:]) + args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site @@ -234,7 +234,7 @@ row_of_graphs = self.callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness vlist = [hop.inputarg(self, arg=0)] - vlist += callparse.callparse(self.rtyper, anygraph, hop, opname) + vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() v_dispatcher = self.dispatcher(shape, index, [v.concretetype for v in vlist[1:]], rresult.lowleveltype) @@ -353,11 +353,6 @@ v_func = r_class.getclsfield(v_cls, self.methodname, hop.llops) hop2 = self.add_instance_arg_to_hop(hop, call_args) - opname = 'simple_call' - if call_args: - opname = 'call_args' - hop2.forced_opname = opname - hop2.v_s_insertfirstarg(v_func, s_func) # insert 'function' if type(hop2.args_r[0]) is SmallFunctionSetPBCRepr and type(r_func) is FunctionsPBCRepr: diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -10,6 +10,7 @@ from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.tool.pairtype import pairtype from rpython.rtyper.rmodel import Repr from rpython.rtyper.rint import IntegerRepr @@ -36,8 +37,10 @@ rtyper = self.rtyper s_index = rtyper.annotator.bookkeeper.immutablevalue(index) hop2 = hop.copy() - hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args hop2.args_s = [self.s_tuple, s_index] hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] return hop2.dispatch() @@ -87,8 +90,10 @@ rtyper = self.rtyper s_index = rtyper.annotator.bookkeeper.immutablevalue(index) hop2 = hop.copy() - hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args hop2.args_s = [self.s_tuple, s_index] hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] return hop2.dispatch() diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -1,4 +1,5 @@ from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.rtyper.error import TyperError from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -71,5 +72,7 @@ s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj hop2.s_result, hop2.r_result = s_new, r_new hop2.v_s_insertfirstarg(c_meth, s_meth) - hop2.forced_opname = 'simple_call' + spaceop = op.simple_call(*hop2.args_v) + spaceop.result = hop2.spaceop.result + hop2.spaceop = spaceop return hop2.dispatch() diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -300,14 +300,14 @@ return inputconst(typeOf(llfn), llfn) def rtype_simple_call(self, hop): - return self.call('simple_call', hop) + return self.call(hop) def rtype_call_args(self, hop): - return self.call('call_args', hop) + return self.call(hop) - def call(self, opname, hop): + def call(self, hop): bk = self.rtyper.annotator.bookkeeper - args = bk.build_args(opname, hop.args_s[1:]) + args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site @@ -317,7 +317,7 @@ vfn = hop.inputarg(self, arg=0) vlist = [self.convert_to_concrete_llfn(vfn, shape, index, hop.llops)] - vlist += callparse.callparse(self.rtyper, anygraph, hop, opname) + vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() if isinstance(vlist[0], Constant): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -650,8 +650,6 @@ class HighLevelOp(object): - forced_opname = None - def __init__(self, rtyper, spaceop, exceptionlinks, llops): self.rtyper = rtyper self.spaceop = spaceop @@ -679,12 +677,11 @@ if type(value) is list: # grunt value = value[:] setattr(result, key, value) - result.forced_opname = self.forced_opname return result def dispatch(self): rtyper = self.rtyper - opname = self.forced_opname or self.spaceop.opname + opname = self.spaceop.opname translate_meth = getattr(rtyper, 'translate_op_' + opname, rtyper.default_translate_operation) return translate_meth(self) From noreply at buildbot.pypy.org Mon May 26 10:03:10 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 26 May 2014 10:03:10 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: use stmcb_get_card_base_itemsize instead of stmcb_index_to_byte_offset Message-ID: <20140526080310.604D51C02D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1245:d6fca1fcaadb Date: 2014-05-26 10:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/d6fca1fcaadb/ Log: use stmcb_get_card_base_itemsize instead of stmcb_index_to_byte_offset diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -43,7 +43,20 @@ n = (struct node_s*)obj; visit((object_t **)&n->next); } - +long stmcb_should_use_cards(struct object_s *obj) +{ + return 0; +} +void stmcb_get_card_base_itemsize( + struct object_s *obj, uintptr_t *base_offset, ssize_t *item_size) +{ + abort(); +} +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + abort(); +} void stmcb_commit_soon() {} static void expand_marker(char *base, uintptr_t odd_number, diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -558,7 +558,11 @@ /* Combine multiple marked cards and do a memcpy for them. We don't try yet to use page_copy() or otherwise take into account privatization of pages (except _has_private_page_in_range) */ - uintptr_t start = 0; + uintptr_t base_offset; + ssize_t item_size; + stmcb_get_card_base_itemsize(realobj, &base_offset, &item_size); + + uintptr_t start_card_index = -1; while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; uint8_t card_value = write_locks[card_lock_idx]; @@ -568,32 +572,40 @@ if (card_value == CARD_MARKED_OLD) { write_locks[card_lock_idx] = CARD_CLEAR; - if (start == 0) { /* first marked card */ - start = (uintptr_t)obj + stmcb_index_to_byte_offset( - realobj, get_card_index_to_index(card_index)); + if (start_card_index == -1) { /* first marked card */ + start_card_index = card_index; + /* start = (uintptr_t)obj + stmcb_index_to_byte_offset( */ + /* realobj, get_card_index_to_index(card_index)); */ } } - if (start /* something to copy */ + if (start_card_index != -1 /* something to copy */ && (card_value != CARD_MARKED_OLD /* found non-marked card */ || card_index == last_card_index)) { /* this is the last card */ /* do the copying: */ - uintptr_t copy_size; + uintptr_t start, copy_size; uintptr_t next_card_offset; - uintptr_t next_card = card_index; + uintptr_t start_card_offset; + uintptr_t next_card_index = card_index; if (card_value == CARD_MARKED_OLD) { /* card_index is the last card of the object, but we need to go one further to get the right offset */ - next_card++; + next_card_index++; } - next_card_offset = stmcb_index_to_byte_offset( - realobj, get_card_index_to_index(next_card)); + + start_card_offset = base_offset + + get_card_index_to_index(start_card_index) * item_size; + + next_card_offset = base_offset + + get_card_index_to_index(next_card_index) * item_size; if (next_card_offset > obj_size) next_card_offset = obj_size; - copy_size = next_card_offset - (start - (uintptr_t)obj); + start = (uintptr_t)obj + start_card_offset; + copy_size = next_card_offset - start_card_offset; + OPT_ASSERT(copy_size > 0); /* dprintf(("copy %lu bytes\n", copy_size)); */ @@ -616,8 +628,7 @@ memcpy(dst, src, copy_size); } - copy_size = 0; - start = 0; + start_card_index = -1; } card_index++; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -250,13 +250,20 @@ */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +/* called to determine if we should use cards for this object. + (makes most sense for big arrays with references) */ +extern long stmcb_should_use_cards(struct object_s *); +/* a special trace-callback that is only called for the marked + ranges of indices (using stm_write_card(o, index)) */ extern void stmcb_trace_cards(struct object_s *, void (object_t **), uintptr_t start, uintptr_t stop); -/* needs to work with index > any valid index (can just return - object's size then) */ -extern uintptr_t stmcb_index_to_byte_offset(struct object_s *, - uintptr_t index); -extern long stmcb_should_use_cards(struct object_s *); +/* this function will be called on objects that support cards + (stmcb_should_use_cards() returned True). It returns the + base_offset (in bytes) inside the object from where the + indices start, and item_size (in bytes) for the size of + one item */ +extern void stmcb_get_card_base_itemsize( + struct object_s *, uintptr_t *base_offset, ssize_t *item_size); extern void stmcb_commit_soon(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -317,15 +317,16 @@ } } -uintptr_t stmcb_index_to_byte_offset(struct object_s *obj, uintptr_t index) +void stmcb_get_card_base_itemsize( + struct object_s *obj, uintptr_t *base_offset, ssize_t *item_size) { struct myobj_s *myobj = (struct myobj_s*)obj; if (myobj->type_id < 421420) { abort(); // works, but we want to test otherwise /* basic case: index=byteoffset */ - return index; } - return sizeof(struct myobj_s) + index * sizeof(object_t*); + *base_offset = sizeof(struct myobj_s); + *item_size = sizeof(object_t *); } long stmcb_should_use_cards(struct object_s *obj) From noreply at buildbot.pypy.org Mon May 26 15:50:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 26 May 2014 15:50:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add some tool to measure residual mem usage over time Message-ID: <20140526135034.4BE491D2CE5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5266:f530e6b6abe8 Date: 2014-05-26 15:51 +0200 http://bitbucket.org/pypy/extradoc/changeset/f530e6b6abe8/ Log: add some tool to measure residual mem usage over time diff --git a/talk/dls2014/misc/measure_memusage.sh b/talk/dls2014/misc/measure_memusage.sh new file mode 100755 --- /dev/null +++ b/talk/dls2014/misc/measure_memusage.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# invoke like +# ./measure_memusage.sh command to execute +# it uses 'top' and records its output in a tempfile (not deleted +# afterwards). The interval for measurements is 1s. The RES/VIRT/SHR +# should be in KiB. + +TMPFILE=$(tempfile) + +"$@" & +PID=$! + +top -d1 -b -i -p $PID > $TMPFILE & +MEMPID=$! + +wait $PID +kill $MEMPID + +cat $TMPFILE | egrep "(PID)|(^\W*$PID)" +echo "RESULTS in $TMPFILE" +#pypy-c ~/pypy/benchmarks/multithread/multithread-richards.py 100 4 2>/dev/null & top -d1 -b -i -p $(pidof pypy-c) > output From noreply at buildbot.pypy.org Mon May 26 16:48:29 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 26 May 2014 16:48:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add todo Message-ID: <20140526144829.460C81D2939@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5267:9b0ba0d80995 Date: 2014-05-26 16:49 +0200 http://bitbucket.org/pypy/extradoc/changeset/9b0ba0d80995/ Log: add todo diff --git a/talk/dls2014/paper/TODO b/talk/dls2014/paper/TODO new file mode 100644 --- /dev/null +++ b/talk/dls2014/paper/TODO @@ -0,0 +1,14 @@ +* measure things +** memory: residual & GC-numbers +** overhead: breakdown (maybe also with multiple threads) +** scaling: microbenchmark +** real-world: our benchmarks on all working interpreters (& Jython) +* explain better the GC-STM integration and its benefits + +Finalize +======== + +* check "claims" in discussion.txt +* re-check the benchmark results (any unexplained weirdness?) +* find references / citations / related work +* check abstract and conclusion, acks & authors From noreply at buildbot.pypy.org Mon May 26 16:59:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 26 May 2014 16:59:15 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add example plot Message-ID: <20140526145915.C33E21D2939@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5268:311db285ffe8 Date: 2014-05-26 17:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/311db285ffe8/ Log: add example plot diff --git a/talk/dls2014/paper/plots/plot_example.py b/talk/dls2014/paper/plots/plot_example.py new file mode 100755 --- /dev/null +++ b/talk/dls2014/paper/plots/plot_example.py @@ -0,0 +1,103 @@ +#!/usr/bin/python + +########################################################## +""" TODO: print thread-descriptor info on commit/abort """ +########################################################## + +import matplotlib +import os +import sys +matplotlib.use('gtkagg') + +from matplotlib import rc +#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) +## for Palatino and other serif fonts use: +rc('font',**{'family':'serif','serif':['Palatino']}) +rc('text', usetex=True) + +args = None +import matplotlib.pyplot as plt +# import pprint - slow as hell + + +ys = {10:[3.95, 7.99, 7.55, 5.39, 6.37, 7.46, 5.26], + 100:[7.16, 7.35, 8.82, 9.65, 7.91, 7.94, 7.56], + 1000:[9.81, 13.38, 14.25, 14.90, 13.26, 14.51, 14.69], + 10000:[8.83, 14.51, 14.35, 14.4, 11.96, 13.94, 14.94], + 100000:[8.23, 14.59, 14.48, 13.81, 13.64, 13.75, 13.72], + 1000000:[3.64, 4.63, 8.25, 3.63, 3.64, 6.46, 6.48], + 10000000:[3.55, 3.59, 3.55, 3.55, 3.56, 3.55, 3.55], + } + + +def plot_tps(ax): + import numpy as np + x = [] + y = [] + yerr = [] + + for k in sorted(ys.keys()): + v = ys[k] + x.append(k) + y.append(np.mean(v)) + yerr.append(np.std(v)) + + ax.errorbar(x, y, yerr=yerr) + + +def main(): + global fig + + print "Draw..." + fig = plt.figure() + + ax = fig.add_subplot(111) + + plot_tps(ax) + + ax.set_xscale('log') + ax.set_ylabel("Requests per Second (TPS)") + ax.set_xlabel("reads\_limit") + ax.set_xlim(5, 20000000) + + #axs[0].set_ylim(0, len(x)) + #ax.set_yticks([r+0.5 for r in range(len(logs))]) + #ax.set_yticklabels(range(1, len(logs)+1)) + #axs[0].set_xticks([]) + print "Drawn." + + # def label_format(x, pos): + # return "%.2f" % (abs((x - left) * 1e-6), ) + # major_formatter = matplotlib.ticker.FuncFormatter(label_format) + # axs[0].xaxis.set_major_formatter(major_formatter) + + #legend = ax.legend() + + plt.draw() + file_name = "setcheck.pdf" + plt.savefig(file_name, format='pdf', + # bbox_extra_artists=(legend,), + bbox_inches='tight', pad_inches=0) + + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description='Plot stm log files') + parser.add_argument('--figure-size', default='6x4', + help='set figure size in inches: format=6x4') + parser.add_argument('--font-size', default='10.0', + help='set font size in pts: 10.0') + parser.add_argument('--png-dpi', default='300', + help='set dpi of png output: 300') + + + args = parser.parse_args() + matplotlib.rcParams.update( + {'figure.figsize': tuple(map(int, args.figure_size.split('x'))), + 'font.size': float(args.font_size), + 'savefig.dpi': int(args.png_dpi), + }) + + + main() From noreply at buildbot.pypy.org Mon May 26 19:13:25 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 26 May 2014 19:13:25 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20140526171325.432421C01E5@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71735:ef8c0505fc21 Date: 2014-05-26 18:12 +0100 http://bitbucket.org/pypy/pypy/changeset/ef8c0505fc21/ Log: fix translation diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -91,7 +91,7 @@ s_index = rtyper.annotator.bookkeeper.immutablevalue(index) hop2 = hop.copy() spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.result + spaceop.result = hop.spaceop.result hop2.spaceop = spaceop hop2.args_v = spaceop.args hop2.args_s = [self.s_tuple, s_index] From noreply at buildbot.pypy.org Mon May 26 19:59:03 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 26 May 2014 19:59:03 +0200 (CEST) Subject: [pypy-commit] pypy default: create class SomeNone Message-ID: <20140526175903.89BF61C3324@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71736:3bd72bcbe792 Date: 2014-05-26 18:58 +0100 http://bitbucket.org/pypy/pypy/changeset/3bd72bcbe792/ Log: create class SomeNone diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -505,6 +505,28 @@ else: return kt.__name__ +class SomeNone(SomePBC): + can_be_None = True + subset_of = None + knowntype = type(None) + const = None + + def __init__(self): + pass + + def isNone(self): + return True + + @property + def descriptions(self): + return set() + + def is_constant(self): + return True + + def is_immutable_constant(self): + return True + class SomeConstantType(SomePBC): can_be_None = False subset_of = None @@ -557,7 +579,7 @@ return False -s_None = SomePBC([], can_be_None=True) +s_None = SomeNone() s_Bool = SomeBool() s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() From noreply at buildbot.pypy.org Mon May 26 22:11:41 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 26 May 2014 22:11:41 +0200 (CEST) Subject: [pypy-commit] pypy default: start splitting SomePBC and SomeNone Message-ID: <20140526201141.6443D1C35CC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71737:857289bf8ff9 Date: 2014-05-26 21:11 +0100 http://bitbucket.org/pypy/pypy/changeset/857289bf8ff9/ Log: start splitting SomePBC and SomeNone diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -9,8 +9,8 @@ SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, - SomeBuiltinMethod, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeSingleFloat, + SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, + SomeByteArray, SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -773,19 +773,13 @@ glob = globals() loc = locals() source = py.code.Source(""" - class __extend__(pairtype(%(classname)s, SomePBC)): - def union((obj, pbc)): - if pbc.isNone(): - return %(classname)s(%(constructor_args)s) - else: - raise UnionError(pbc, obj) + class __extend__(pairtype(%(classname)s, SomeNone)): + def union((obj, none)): + return %(classname)s(%(constructor_args)s) - class __extend__(pairtype(SomePBC, %(classname)s)): - def union((pbc, obj)): - if pbc.isNone(): - return %(classname)s(%(constructor_args)s) - else: - raise UnionError(pbc, obj) + class __extend__(pairtype(SomeNone, %(classname)s)): + def union((none, obj)): + return %(classname)s(%(constructor_args)s) """ % loc) exec source.compile() in glob diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -1,8 +1,9 @@ """ Type inference for user-defined classes. """ -from rpython.annotator.model import SomePBC, s_ImpossibleValue, unionof -from rpython.annotator.model import SomeInteger, SomeTuple, SomeString, AnnotatorError +from rpython.annotator.model import ( + SomePBC, s_ImpossibleValue, unionof, s_None, SomeInteger, SomeTuple, + SomeString, AnnotatorError) from rpython.annotator import description @@ -351,8 +352,10 @@ if uplookup is not None: d.append(updesc.bind_self(self, flags)) - if d or pbc.can_be_None: + if d: return SomePBC(d, can_be_None=pbc.can_be_None) + elif pbc.can_be_None: + return s_None else: return s_ImpossibleValue diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -424,36 +424,32 @@ immutable = True def __init__(self, descriptions, can_be_None=False, subset_of=None): + assert descriptions # descriptions is a set of Desc instances descriptions = set(descriptions) self.descriptions = descriptions self.can_be_None = can_be_None self.subset_of = subset_of self.simplify() - if self.isNone(): - self.knowntype = type(None) - self.const = None - else: - knowntype = reduce(commonbase, - [x.knowntype for x in descriptions]) - if knowntype == type(Exception): - knowntype = type - if knowntype != object: - self.knowntype = knowntype - if len(descriptions) == 1 and not can_be_None: - # hack for the convenience of direct callers to SomePBC(): - # only if there is a single object in descriptions - desc, = descriptions - if desc.pyobj is not None: - self.const = desc.pyobj - elif len(descriptions) > 1: - from rpython.annotator.description import ClassDesc - if self.getKind() is ClassDesc: - # a PBC of several classes: enforce them all to be - # built, without support for specialization. See - # rpython/test/test_rpbc.test_pbc_of_classes_not_all_used - for desc in descriptions: - desc.getuniqueclassdef() + knowntype = reduce(commonbase, [x.knowntype for x in descriptions]) + if knowntype == type(Exception): + knowntype = type + if knowntype != object: + self.knowntype = knowntype + if len(descriptions) == 1 and not can_be_None: + # hack for the convenience of direct callers to SomePBC(): + # only if there is a single object in descriptions + desc, = descriptions + if desc.pyobj is not None: + self.const = desc.pyobj + elif len(descriptions) > 1: + from rpython.annotator.description import ClassDesc + if self.getKind() is ClassDesc: + # a PBC of several classes: enforce them all to be + # built, without support for specialization. See + # rpython/test/test_rpbc.test_pbc_of_classes_not_all_used + for desc in descriptions: + desc.getuniqueclassdef() def any_description(self): return iter(self.descriptions).next() @@ -466,32 +462,24 @@ kinds.add(x.__class__) if len(kinds) > 1: raise AnnotatorError("mixing several kinds of PBCs: %r" % kinds) - if not kinds: - raise ValueError("no 'kind' on the 'None' PBC") return kinds.pop() def simplify(self): - if self.descriptions: - # We check that the set only contains a single kind of Desc instance - kind = self.getKind() - # then we remove unnecessary entries in self.descriptions: - # some MethodDescs can be 'shadowed' by others - if len(self.descriptions) > 1: - kind.simplify_desc_set(self.descriptions) - else: - assert self.can_be_None, "use s_ImpossibleValue" + # We check that the set only contains a single kind of Desc instance + kind = self.getKind() + # then we remove unnecessary entries in self.descriptions: + # some MethodDescs can be 'shadowed' by others + if len(self.descriptions) > 1: + kind.simplify_desc_set(self.descriptions) def isNone(self): - return len(self.descriptions) == 0 + return False def can_be_none(self): return self.can_be_None def nonnoneify(self): - if self.isNone(): - return s_ImpossibleValue - else: - return SomePBC(self.descriptions, can_be_None=False) + return SomePBC(self.descriptions, can_be_None=False) def fmt_descriptions(self, pbis): if hasattr(self, 'const'): @@ -527,6 +515,10 @@ def is_immutable_constant(self): return True + def nonnoneify(self): + return s_ImpossibleValue + + class SomeConstantType(SomePBC): can_be_None = False subset_of = None diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -8,7 +8,7 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, - SomeFloat, SomeIterator, SomePBC, SomeType, s_ImpossibleValue, + SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue @@ -765,6 +765,10 @@ # This should probably never happen raise AnnotatorError("Cannot call len on a pbc") +class __extend__(SomeNone): + def bind_callables_under(self, classdef, name): + return self + #_________________________________________ # weakrefs diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -1,6 +1,6 @@ from rpython.annotator import model as annmodel from rpython.tool.pairtype import pairtype -from rpython.annotator.binaryop import _make_none_union, SomePBC # SomePBC needed by _make_none_union +from rpython.annotator.binaryop import _make_none_union, SomeNone # SomeNone needed by _make_none_union from rpython.annotator.bookkeeper import getbookkeeper from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.annlowlevel import cachedtype From noreply at buildbot.pypy.org Tue May 27 01:52:18 2014 From: noreply at buildbot.pypy.org (stefanor) Date: Tue, 27 May 2014 01:52:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Replace the VFP assert in detect_cpu with a ProcessorAutodetectError, so we can use detect_cpu on ARMv4 Message-ID: <20140526235218.5B2241C01E5@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: Changeset: r71738:e9af47acbea2 Date: 2014-05-27 01:51 +0200 http://bitbucket.org/pypy/pypy/changeset/e9af47acbea2/ Log: Replace the VFP assert in detect_cpu with a ProcessorAutodetectError, so we can use detect_cpu on ARMv4 diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -81,7 +81,9 @@ # if result.startswith('arm'): from rpython.jit.backend.arm.detect import detect_float - assert detect_float(), 'the JIT-compiler requires a vfp unit' + if not detect_float(): + raise ProcessorAutodetectError( + 'the JIT-compiler requires a vfp unit') # return result From noreply at buildbot.pypy.org Tue May 27 04:15:04 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 27 May 2014 04:15:04 +0200 (CEST) Subject: [pypy-commit] pypy default: kill some uses of pbc.isNone() Message-ID: <20140527021504.E9F671C3324@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71739:181bc0186716 Date: 2014-05-27 03:13 +0100 http://bitbucket.org/pypy/pypy/changeset/181bc0186716/ Log: kill some uses of pbc.isNone() diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -791,28 +791,35 @@ _make_none_union('SomeDict', 'obj.dictdef') _make_none_union('SomeWeakRef', 'obj.classdef') -# getitem on SomePBCs, in particular None fails class __extend__(pairtype(SomePBC, SomeObject)): def getitem((pbc, o)): - if not pbc.isNone(): - raise AnnotatorError("getitem on %r" % pbc) + raise AnnotatorError("getitem on %r" % pbc) + + def setitem((pbc, o), s_value): + raise AnnotatorError("setitem on %r" % pbc) + +class __extend__(pairtype(SomeNone, SomeObject)): + def getitem((none, o)): return s_ImpossibleValue - def setitem((pbc, o), s_value): - if not pbc.isNone(): - raise AnnotatorError("setitem on %r" % pbc) + def setitem((none, o), s_value): + return None class __extend__(pairtype(SomePBC, SomeString)): def add((pbc, o)): - if not pbc.isNone(): - raise AnnotatorError('add on %r' % pbc) + raise AnnotatorError('add on %r' % pbc) + +class __extend__(pairtype(SomeNone, SomeString)): + def add((none, o)): return s_ImpossibleValue class __extend__(pairtype(SomeString, SomePBC)): def add((o, pbc)): - if not pbc.isNone(): - raise AnnotatorError('add on %r' % pbc) + raise AnnotatorError('add on %r' % pbc) + +class __extend__(pairtype(SomeString, SomeNone)): + def add((o, none)): return s_ImpossibleValue #_________________________________________ diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -2,8 +2,8 @@ Type inference for user-defined classes. """ from rpython.annotator.model import ( - SomePBC, s_ImpossibleValue, unionof, s_None, SomeInteger, SomeTuple, - SomeString, AnnotatorError) + SomePBC, SomeNone, s_ImpossibleValue, unionof, s_None, SomeInteger, + SomeTuple, SomeString, AnnotatorError) from rpython.annotator import description @@ -104,10 +104,10 @@ self.bookkeeper.annotator.reflowfromposition(position) # check for method demotion and after-the-fact method additions - if isinstance(s_newvalue, SomePBC): + if (isinstance(s_newvalue, SomePBC) and + not isinstance(s_newvalue, SomeNone)): attr = self.name - if (not s_newvalue.isNone() and - s_newvalue.getKind() == description.MethodDesc): + if s_newvalue.getKind() == description.MethodDesc: # is method if homedef.classdesc.read_attribute(attr, None) is None: if not homedef.check_missing_attribute_update(attr): From noreply at buildbot.pypy.org Tue May 27 05:02:23 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 27 May 2014 05:02:23 +0200 (CEST) Subject: [pypy-commit] pypy default: kill pbc.isNone() Message-ID: <20140527030224.037901C1017@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71740:b13ff9f02cb0 Date: 2014-05-27 04:01 +0100 http://bitbucket.org/pypy/pypy/changeset/b13ff9f02cb0/ Log: kill pbc.isNone() diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -472,9 +472,6 @@ if len(self.descriptions) > 1: kind.simplify_desc_set(self.descriptions) - def isNone(self): - return False - def can_be_none(self): return self.can_be_None @@ -502,9 +499,6 @@ def __init__(self): pass - def isNone(self): - return True - @property def descriptions(self): return set() diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -739,8 +739,7 @@ getattr.can_only_throw = [] def setattr(self, s_attr, s_value): - if not self.isNone(): - raise AnnotatorError("Cannot modify attribute of a pre-built constant") + raise AnnotatorError("Cannot modify attribute of a pre-built constant") def call(self, args): bookkeeper = getbookkeeper() @@ -751,24 +750,27 @@ return SomePBC(d, can_be_None=self.can_be_None) def bool_behavior(self, s): - if self.isNone(): - s.const = False - elif not self.can_be_None: + if not self.can_be_None: s.const = True def len(self): - if self.isNone(): - # this None could later be generalized into an empty list, - # whose length is the constant 0; so let's tentatively answer 0. - return immutablevalue(0) - else: - # This should probably never happen - raise AnnotatorError("Cannot call len on a pbc") + raise AnnotatorError("Cannot call len on a pbc") class __extend__(SomeNone): def bind_callables_under(self, classdef, name): return self + def setattr(self, s_attr, s_value): + return None + + def bool_behavior(self, s): + s.const = False + + def len(self): + # XXX: this None could later be generalized into an empty list, + # whose length is the constant 0; so let's tentatively answer 0. + return immutablevalue(0) + #_________________________________________ # weakrefs diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -154,11 +154,12 @@ # special-casing for methods: # if s_value is SomePBC([MethodDescs...]) # return a PBC representing the underlying functions - if isinstance(s_value, annmodel.SomePBC): - if not s_value.isNone() and s_value.getKind() == description.MethodDesc: - s_value = self.classdef.lookup_filter(s_value) - funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions] - return annmodel.SomePBC(funcdescs) + if (isinstance(s_value, annmodel.SomePBC) and + not isinstance(s_value, annmodel.SomeNone) and + s_value.getKind() == description.MethodDesc): + s_value = self.classdef.lookup_filter(s_value) + funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions] + return annmodel.SomePBC(funcdescs) return None # not a method def get_ll_eq_function(self): diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -25,8 +25,6 @@ from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, MethodOfFrozenPBCRepr) - if self.isNone(): - return none_frozen_pbc_repr kind = self.getKind() if issubclass(kind, description.FunctionDesc): sample = self.any_description() @@ -63,6 +61,13 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t +class __extend__(annmodel.SomeNone): + def rtyper_makerepr(self, rtyper): + return none_frozen_pbc_repr + + def rtyper_makekey(self): + return self.__class__, + # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -808,9 +813,6 @@ def __init__(self, rtyper, s_pbc): self.rtyper = rtyper self.s_pbc = s_pbc - if s_pbc.isNone(): - raise TyperError("unsupported: variable of type " - "bound-method-object or None") mdescs = list(s_pbc.descriptions) methodname = mdescs[0].name classdef = mdescs[0].selfclassdef diff --git a/rpython/translator/goal/query.py b/rpython/translator/goal/query.py --- a/rpython/translator/goal/query.py +++ b/rpython/translator/goal/query.py @@ -61,7 +61,7 @@ def ismeth(s_val): if not isinstance(s_val, annmodel.SomePBC): return False - if s_val.isNone(): + if isinstance(s_val, annmodel.SomeNone): return False return s_val.getKind() is MethodDesc bk = translator.annotator.bookkeeper From noreply at buildbot.pypy.org Tue May 27 11:04:36 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 11:04:36 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: talk a bit about jit-integration Message-ID: <20140527090437.05B011C02D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5269:9af8404ebf69 Date: 2014-05-27 09:47 +0200 http://bitbucket.org/pypy/extradoc/changeset/9af8404ebf69/ Log: talk a bit about jit-integration diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -937,6 +937,50 @@ \section{Evaluation} +We evaluate our system in a Python interpreter called +PyPy\footnote{www.pypy.org}. PyPy is an implementation of an +interpreter for the Python language. It has a special focus on speed, +as it provides a just-in-time (JIT) compiler to speed up applications +running on top of it. For comparison, we also do evaluation on other +Python interpreters: +\begin{description} +\item[CPython] is the reference implementation of the Python + language. It is the most widely used interpreter for this language. + The implementation uses a GIL for synchronisation in multi-threaded + execution and it does not feature a JIT compiler. +\item[Jython] is an implementation of Python on top of the Java Virtual + Machine (JVM). Instead of a GIL, this interpreter uses fine-grained + locking for synchronisation. This enables true parallelism when + executing code on multiple threads. In addition, its integration + with the JVM provides it with a JIT compiler for faster execution. +\end{description} + +Here, we will not go into detail about the integration of our STM +system with PyPy's JIT. In fact, we will disable it for all benchmarks +except those in section \ref{sec:real-world-bench}. We would like to +regard it as a simple performance enhancement, but that is not what +happens in reality. First, since the JIT is a tracing +JIT\remi{explain?} running in multiple threads, it may compile +different things in each run because of the non-deterministic +thread-scheduling of the operating system (OS). Second, it is able to +remove some allocations in some cases. Because compilation is already +non-deterministic, so is this allocation-removal. And third, we did +not have enough time to optimise integration with STM so that the JIT +exposes the overhead of STM more by speeding up all the rest. + +Overall, we believe that disabling it on all benchmarks except the +real-world benchmarks in section \ref{sec:real-world-bench} is better +because we can minimise non-determinism. We also do not want to depend +on the capabilities of the JIT in these experiments. + +% benchmarks with: pypy-c--Ojit-d1454093dd48+-14-05-26-17:16 +% that's with stmgc 70c403598485 + +% Sometimes with JIT, sometimes without. +% For scaling & memory w/o jit, since the jit can optimize away +% many allocations and exposes the overhead more. + + \subsection{Memory Requirements} \begin{itemize} @@ -963,7 +1007,7 @@ maybe some simple micro benchmarks with adaptable conflict rate -\subsection{Real-World Benchmarks} +\subsection{Real-World Benchmarks\label{sec:real-world-bench}} more real benchmarks comparing multiple implementations: \begin{itemize}[noitemsep] From noreply at buildbot.pypy.org Tue May 27 11:04:38 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 11:04:38 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add first plot about memory usage Message-ID: <20140527090438.664E61C02D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5270:7dd50f571cae Date: 2014-05-27 10:58 +0200 http://bitbucket.org/pypy/extradoc/changeset/7dd50f571cae/ Log: add first plot about memory usage diff --git a/talk/dls2014/paper/plots/plot_richards_mem.py b/talk/dls2014/paper/plots/plot_richards_mem.py new file mode 100755 --- /dev/null +++ b/talk/dls2014/paper/plots/plot_richards_mem.py @@ -0,0 +1,109 @@ +#!/usr/bin/python + +# obtained log-file with +# pypy-c --jit off ~/pypy/benchmarks/multithread/multithread-richards.py 60 4 2>richards_mem.log +# rss using measure_memusage.sh + + + +import matplotlib +import os +import sys +matplotlib.use('gtkagg') + +from matplotlib import rc +#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) +## for Palatino and other serif fonts use: +rc('font',**{'family':'serif','serif':['Palatino']}) +rc('text', usetex=True) + +args = None +import matplotlib.pyplot as plt +# import pprint - slow as hell + +with open('richards_mem.log') as f: + xs = [] + y1s = [] + y2s = [] + first_time = None + for line in f.readlines(): + line = line.strip().strip("{").strip("}") + time, mems = line.split(":") + if not first_time: + first_time = float(time) + xs.append(float(time) - first_time) + real_mem, max_rss = mems.split("/") + y1s.append(int(real_mem) / 1024. / 1024) + +x2s = range(12) +y2s = [152304, 180060, 180428, + 180448, 180460, 180696, + 180124, 180552, 180584, + 180588, 180544, 180252] +y2s = map(lambda x: x / 1024., y2s) + + +def plot_mems(ax): + ax.plot(xs, y1s, '-+', label="Memory") + ax.plot(x2s, y2s, '-x', label="Resident Set Size (RSS)") + + +def main(): + global fig + + print "Draw..." + fig = plt.figure() + + ax = fig.add_subplot(111) + + plot_mems(ax) + + ax.set_ylabel("Memory [MiB]") + ax.set_xlabel("Runtime [s]") + ax.set_xlim(-0.5, 11.5) + ax.set_ylim(0, 200) + + #axs[0].set_ylim(0, len(x)) + #ax.set_yticks([r+0.5 for r in range(len(logs))]) + #ax.set_yticklabels(range(1, len(logs)+1)) + #axs[0].set_xticks([]) + + # def label_format(x, pos): + # return "%.2f" % (abs((x - left) * 1e-6), ) + # major_formatter = matplotlib.ticker.FuncFormatter(label_format) + # axs[0].xaxis.set_major_formatter(major_formatter) + + legend = ax.legend(loc=5) + ax.set_title("Memory Usage in Richards") + + plt.draw() + #plt.show() + print "Drawn." + + file_name = "richards_mem.pdf" + plt.savefig(file_name, format='pdf', + bbox_extra_artists=(legend,), + bbox_inches='tight', pad_inches=0) + + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description='Plot stm log files') + parser.add_argument('--figure-size', default='6x4', + help='set figure size in inches: format=6x4') + parser.add_argument('--font-size', default='10.0', + help='set font size in pts: 10.0') + parser.add_argument('--png-dpi', default='300', + help='set dpi of png output: 300') + + + args = parser.parse_args() + matplotlib.rcParams.update( + {'figure.figsize': tuple(map(int, args.figure_size.split('x'))), + 'font.size': float(args.font_size), + 'savefig.dpi': int(args.png_dpi), + }) + + + main() diff --git a/talk/dls2014/paper/plots/richards_mem.pdf b/talk/dls2014/paper/plots/richards_mem.pdf new file mode 100644 index 0000000000000000000000000000000000000000..be2b698520270732815dd7b2b21107e8807d9774 GIT binary patch [cut] From noreply at buildbot.pypy.org Tue May 27 11:04:39 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 11:04:39 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add plot to paper Message-ID: <20140527090439.B6BB31C02D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5271:600522f52bfd Date: 2014-05-27 11:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/600522f52bfd/ Log: add plot to paper diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -991,6 +991,13 @@ maybe some memory usage graph over time +\begin{figure}[h] + \centering + \includegraphics[width=1\columnwidth]{plots/richards_mem.pdf} + \caption{Actual memory managed by the GC and resident set size + over time in Richards benchmark\label{fig:richards_mem}} +\end{figure} + \subsection{Overhead Breakdown} diff --git a/talk/dls2014/paper/plots/plot_richards_mem.py b/talk/dls2014/paper/plots/plot_richards_mem.py --- a/talk/dls2014/paper/plots/plot_richards_mem.py +++ b/talk/dls2014/paper/plots/plot_richards_mem.py @@ -44,7 +44,8 @@ def plot_mems(ax): - ax.plot(xs, y1s, '-+', label="Memory") + ax.plot(xs, y1s, '-o', label="GC managed memory", + ms=2) ax.plot(x2s, y2s, '-x', label="Resident Set Size (RSS)") @@ -74,7 +75,7 @@ # axs[0].xaxis.set_major_formatter(major_formatter) legend = ax.legend(loc=5) - ax.set_title("Memory Usage in Richards") + #ax.set_title("Memory Usage in Richards") plt.draw() #plt.show() diff --git a/talk/dls2014/paper/plots/richards_mem.pdf b/talk/dls2014/paper/plots/richards_mem.pdf index be2b698520270732815dd7b2b21107e8807d9774..138f5784e44512fef2911dbf52122be8a88d49bd GIT binary patch [cut] From noreply at buildbot.pypy.org Tue May 27 11:05:39 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 11:05:39 +0200 (CEST) Subject: [pypy-commit] stmgc instrumented: extra branch for some instrumented versions used for the paper Message-ID: <20140527090539.E042E1C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: instrumented Changeset: r1246:b6b8492892d5 Date: 2014-05-27 11:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/b6b8492892d5/ Log: extra branch for some instrumented versions used for the paper diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -79,8 +79,31 @@ pages_setup_readmarkers_for_nursery(); } +#define MEASURE_MEM +#include +#include +#include + +pthread_t m_thread; +static volatile long kill_measurement = 0; +void* measurement_thread(void *arg) +{ + while (!kill_measurement) { + usleep(100000); /* 100ms */ + double time = get_stm_time(); +#ifdef MEASURE_MEM + struct rusage usage; + getrusage(RUSAGE_SELF, &usage); + fprintf(stderr, "{%f:%ld/%ld}\n", time, + (long)pages_ctl.total_allocated, usage.ru_maxrss*1024); +#endif + } + return NULL; +} + void stm_setup(void) { + pthread_create(&m_thread, NULL, measurement_thread, NULL); /* Check that some values are acceptable */ assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); assert(4096 <= ((uintptr_t)STM_SEGMENT)); @@ -152,6 +175,9 @@ need to call it. */ assert(!_has_mutex()); + kill_measurement = 1; + pthread_join(m_thread, NULL); + long i; for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); From noreply at buildbot.pypy.org Tue May 27 11:35:14 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 11:35:14 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add test Message-ID: <20140527093514.86F7C1C142A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5272:a441daa69191 Date: 2014-05-27 11:36 +0200 http://bitbucket.org/pypy/extradoc/changeset/a441daa69191/ Log: add test diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -983,6 +983,25 @@ \subsection{Memory Requirements} +There are several sources of extra memory requirements in our +STM system. First, we need to keep track of the state an object +is in. We do this using flags and an overflow number. Both currently +fit in an additional header of 4~bytes per object. + +Second, there are areas in memory private to each segment (see +section \ref{sub:Setup}). The Nursery for example is 4~MiB in +size. Additionally, the area used for the read markers is lazily +mapped by the kernel and only needed for objects in the old object +space. Thus, it can be at most the total size of memory used by +old objects divided by 16. + +Third, since we do copy on write, if all objects were to be +written to at the same time, all pages would need to be privatised +for all objects in the old object space. In that case we would +need the total amount of memory required by old objects multiplied +by $N+1$ (incl. the sharing segment). +\remi{maybe collect some statistics about pages privatised per segment} + \begin{itemize} \item stm\_flags per object \item read markers and other sections From noreply at buildbot.pypy.org Tue May 27 13:44:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 13:44:16 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: text Message-ID: <20140527114416.C3AF41C0109@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5273:fa139877cac7 Date: 2014-05-27 13:45 +0200 http://bitbucket.org/pypy/extradoc/changeset/fa139877cac7/ Log: text diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -938,21 +938,22 @@ \section{Evaluation} We evaluate our system in a Python interpreter called -PyPy\footnote{www.pypy.org}. PyPy is an implementation of an +PyPy\footnote{www.pypy.org} (version 2.3). PyPy is an implementation of an interpreter for the Python language. It has a special focus on speed, as it provides a just-in-time (JIT) compiler to speed up applications -running on top of it. For comparison, we also do evaluation on other -Python interpreters: +running on top of it. For comparison, we compare between normal PyPy +using a GIL and a PyPy with STM, as well as between other Python interpreters: \begin{description} -\item[CPython] is the reference implementation of the Python +\item[CPython] (version 2.7.6) is the reference implementation of the Python language. It is the most widely used interpreter for this language. The implementation uses a GIL for synchronisation in multi-threaded execution and it does not feature a JIT compiler. -\item[Jython] is an implementation of Python on top of the Java Virtual - Machine (JVM). Instead of a GIL, this interpreter uses fine-grained - locking for synchronisation. This enables true parallelism when - executing code on multiple threads. In addition, its integration - with the JVM provides it with a JIT compiler for faster execution. +\item[Jython] (version 2.7b1) is an implementation of Python on top of + the Java Virtual Machine (JVM). Instead of a GIL, this interpreter + uses fine-grained locking for synchronisation. This enables true + parallelism when executing code on multiple threads. In addition, its + integration with the JVM provides it with a JIT compiler for faster + execution. \end{description} Here, we will not go into detail about the integration of our STM @@ -973,6 +974,13 @@ because we can minimise non-determinism. We also do not want to depend on the capabilities of the JIT in these experiments. +We performed all benchmarks on a machine with a Intel Core i7-4770 +CPU~@3.40GHz (4 cores, 8 threads). There are 16~GiB of memory +available and we ran them under Ubuntu 14.04 with a Linux 3.13.0 +kernel. The STM system was compiled with a number of segments $N=4$ +and a maximum amount of memory of 1.5~GiB (both are configurable at +compile time). + % benchmarks with: pypy-c--Ojit-d1454093dd48+-14-05-26-17:16 % that's with stmgc 70c403598485 @@ -986,7 +994,7 @@ There are several sources of extra memory requirements in our STM system. First, we need to keep track of the state an object is in. We do this using flags and an overflow number. Both currently -fit in an additional header of 4~bytes per object. +fit in a single additional header of 4~bytes per object. Second, there are areas in memory private to each segment (see section \ref{sub:Setup}). The Nursery for example is 4~MiB in @@ -999,16 +1007,43 @@ written to at the same time, all pages would need to be privatised for all objects in the old object space. In that case we would need the total amount of memory required by old objects multiplied -by $N+1$ (incl. the sharing segment). +by $N+1$ (incl. the sharing segment). Pages get re-shared during +major collections if possible. \remi{maybe collect some statistics about pages privatised per segment} -\begin{itemize} -\item stm\_flags per object -\item read markers and other sections -\item private pages -\end{itemize} +\remi{The following discussion about richards mem usage does not +say that much... Also, RSS is not a good measure but it's hard to +get something better.} +In figure \ref{fig:richards_mem} we look at the memory usage of +one of our benchmarks called Richards\footnote{OS kernel simulation +benchmark}. The \emph{Resident Set Size} (RSS) shows the physical memory +assigned to the process. From it, we see that the process' memory +usage does not explode during the benchmark but actually stays pretty +much the same after start-up. Since it is the job of the OS to map +physical memory, this RSS number should be seen as a maximum. It is +possible that some of the memory is not required any more but still +assigned to our process. -maybe some memory usage graph over time +The \emph{GC managed memory} counts all memory used in the old object +space including the memory required for private pages. The sharp drops +in memory usage come from major collections that free old objects and +re-share pages. Again the overall memory usage stays the same and +we see that in this benchmark we have around 1 major collection every +second. + +For PyPy-STM the average memory requirement is 29~MiB and there are +$\sim 11$ major collections during the runtime. Normal PyPy with a GIL +grows its memory up to just 7~MiB and does not do a single major +collection in that time. + +We are missing a memory optimisation to store small objects in a more +compact way, which is done by a normal PyPy not using STM. +Additionally, since normal PyPy uses a GIL, it does not need to +duplicate any data structures like e.g. the Nursery for each +thread. This, the missing optimisation, and the additional memory +requirements for STM explained above account for this difference. +\remi{I don't know how much sense it makes to go deeper. We will +improve this in the future, but right now this is the overall picture.} \begin{figure}[h] \centering @@ -1020,10 +1055,13 @@ \subsection{Overhead Breakdown} +\remi{gs:segment prefix overhead is virtually none (maybe instruction cache)} +\remi{update numbers in pypy/TODO} + \begin{itemize} \item time taken by read \& write barriers \item time spent committing \& aborting (maybe with different numbers - of threads) + of threads; maybe split conflict detection and obj sync on commit) \item time in GC \end{itemize} From noreply at buildbot.pypy.org Tue May 27 14:34:24 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 14:34:24 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: remove rss comparison and try with some page privatisation statistics Message-ID: <20140527123424.A108A1C3505@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5274:477a6b8da6f8 Date: 2014-05-27 14:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/477a6b8da6f8/ Log: remove rss comparison and try with some page privatisation statistics diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1009,36 +1009,27 @@ need the total amount of memory required by old objects multiplied by $N+1$ (incl. the sharing segment). Pages get re-shared during major collections if possible. -\remi{maybe collect some statistics about pages privatised per segment} -\remi{The following discussion about richards mem usage does not -say that much... Also, RSS is not a good measure but it's hard to -get something better.} -In figure \ref{fig:richards_mem} we look at the memory usage of -one of our benchmarks called Richards\footnote{OS kernel simulation -benchmark}. The \emph{Resident Set Size} (RSS) shows the physical memory -assigned to the process. From it, we see that the process' memory -usage does not explode during the benchmark but actually stays pretty -much the same after start-up. Since it is the job of the OS to map -physical memory, this RSS number should be seen as a maximum. It is -possible that some of the memory is not required any more but still -assigned to our process. - -The \emph{GC managed memory} counts all memory used in the old object -space including the memory required for private pages. The sharp drops -in memory usage come from major collections that free old objects and -re-share pages. Again the overall memory usage stays the same and -we see that in this benchmark we have around 1 major collection every -second. +In figure \ref{fig:richards_mem} we look at the memory usage of one of +our benchmarks called Richards\footnote{OS kernel simulation +benchmark}. The \emph{GC managed memory} counts all memory used in the +old object space including the memory required for private pages. The +sharp drops in memory usage come from major collections that free old +objects and re-share pages. The average memory usage stays around +29~MiB and we see that in this benchmark we have around 1 major +collection every second. The \emph{page privatisation}, which +represents the percentage of used pages with at least one private +copy, shows the same spikes as the memory usage. These come directly from +re-sharing the pages. The maximum page privatisation is around $20\%$ +between major collections. Thus we can say that $~20\%$ of the old +objects get modified between collections in this benchmark. For PyPy-STM the average memory requirement is 29~MiB and there are $\sim 11$ major collections during the runtime. Normal PyPy with a GIL grows its memory up to just 7~MiB and does not do a single major -collection in that time. - -We are missing a memory optimisation to store small objects in a more -compact way, which is done by a normal PyPy not using STM. -Additionally, since normal PyPy uses a GIL, it does not need to +collection in that time. Compared to normal PyPy, we are missing a +memory optimisation to store small objects in a more compact +way. Additionally, since normal PyPy uses a GIL, it does not need to duplicate any data structures like e.g. the Nursery for each thread. This, the missing optimisation, and the additional memory requirements for STM explained above account for this difference. @@ -1048,13 +1039,14 @@ \begin{figure}[h] \centering \includegraphics[width=1\columnwidth]{plots/richards_mem.pdf} - \caption{Actual memory managed by the GC and resident set size + \caption{Actual memory managed by the GC and the page privatisation over time in Richards benchmark\label{fig:richards_mem}} \end{figure} \subsection{Overhead Breakdown} +\remi{do it on a non-jit build (see reason above)} \remi{gs:segment prefix overhead is virtually none (maybe instruction cache)} \remi{update numbers in pypy/TODO} diff --git a/talk/dls2014/paper/plots/plot_richards_mem.py b/talk/dls2014/paper/plots/plot_richards_mem.py --- a/talk/dls2014/paper/plots/plot_richards_mem.py +++ b/talk/dls2014/paper/plots/plot_richards_mem.py @@ -32,21 +32,26 @@ if not first_time: first_time = float(time) xs.append(float(time) - first_time) - real_mem, max_rss = mems.split("/") + real_mem, max_rss, page_util = mems.split("/") y1s.append(int(real_mem) / 1024. / 1024) + y2s.append(float(page_util) * 100) -x2s = range(12) -y2s = [152304, 180060, 180428, - 180448, 180460, 180696, - 180124, 180552, 180584, - 180588, 180544, 180252] -y2s = map(lambda x: x / 1024., y2s) +# RSS: +# x2s = range(12) +# y2s = [152304, 180060, 180428, +# 180448, 180460, 180696, +# 180124, 180552, 180584, +# 180588, 180544, 180252] +# y2s = map(lambda x: x / 1024., y2s) -def plot_mems(ax): - ax.plot(xs, y1s, '-o', label="GC managed memory", - ms=2) - ax.plot(x2s, y2s, '-x', label="Resident Set Size (RSS)") +def plot_mems(ax, ax2): + print sum(y1s) / len(xs) + print sum(y2s) / len(xs) + a, = ax.plot(xs, y1s, 'b-') + b, = ax2.plot(xs, y2s, 'r-') + return ax.legend((a, b), + ('GC managed memory', 'Page privatisation')) def main(): @@ -57,12 +62,17 @@ ax = fig.add_subplot(111) - plot_mems(ax) - - ax.set_ylabel("Memory [MiB]") + ax.set_ylabel("Memory [MiB]", color='b') ax.set_xlabel("Runtime [s]") ax.set_xlim(-0.5, 11.5) - ax.set_ylim(0, 200) + ax.set_ylim(0, 50) + + ax2 = ax.twinx() + ax2.set_ylim(0, 100) + ax2.set_ylabel("\% of pages with $>1$ private copy", + color='r') + legend = plot_mems(ax, ax2) + #axs[0].set_ylim(0, len(x)) #ax.set_yticks([r+0.5 for r in range(len(logs))]) @@ -74,7 +84,6 @@ # major_formatter = matplotlib.ticker.FuncFormatter(label_format) # axs[0].xaxis.set_major_formatter(major_formatter) - legend = ax.legend(loc=5) #ax.set_title("Memory Usage in Richards") plt.draw() diff --git a/talk/dls2014/paper/plots/richards_mem.pdf b/talk/dls2014/paper/plots/richards_mem.pdf index 138f5784e44512fef2911dbf52122be8a88d49bd..17e9e4ae371aeb31f4e7578a35b7ab0ebb9a775c GIT binary patch [cut] From noreply at buildbot.pypy.org Tue May 27 19:43:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 May 2014 19:43:36 +0200 (CEST) Subject: [pypy-commit] pypy release-2.3.x: Fix: un-comment some assertion that is really checking an important Message-ID: <20140527174336.A39A51C010D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.3.x Changeset: r71743:6fb181888bbf Date: 2014-05-27 17:57 +0200 http://bitbucket.org/pypy/pypy/changeset/6fb181888bbf/ Log: Fix: un-comment some assertion that is really checking an important property; and fix test_string:test_promote_string() by not calling make_result_of_lastop() indirectly from opimpl_str_guard_value(). diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -167,12 +167,11 @@ def make_result_of_lastop(self, resultbox): got_type = resultbox.type - # XXX disabled for now, conflicts with str_guard_value - #if not we_are_translated(): - # typeof = {'i': history.INT, - # 'r': history.REF, - # 'f': history.FLOAT} - # assert typeof[self.jitcode._resulttypes[self.pc]] == got_type + if not we_are_translated(): + typeof = {'i': history.INT, + 'r': history.REF, + 'f': history.FLOAT} + assert typeof[self.jitcode._resulttypes[self.pc]] == got_type target_index = ord(self.bytecode[self.pc-1]) if got_type == history.INT: self.registers_i[target_index] = resultbox @@ -1321,14 +1320,14 @@ self.metainterp.clear_exception() resbox = self.metainterp.execute_and_record_varargs(opnum, argboxes, descr=descr) - if resbox is not None: - self.make_result_of_lastop(resbox) - # ^^^ this is done before handle_possible_exception() because we - # need the box to show up in get_list_of_active_boxes() if pure and self.metainterp.last_exc_value_box is None and resbox: resbox = self.metainterp.record_result_of_call_pure(resbox) exc = exc and not isinstance(resbox, Const) if exc: + if resbox is not None: + self.make_result_of_lastop(resbox) + # ^^^ this is done before handle_possible_exception() because we + # need the box to show up in get_list_of_active_boxes() self.metainterp.handle_possible_exception() else: self.metainterp.assert_no_exception() From noreply at buildbot.pypy.org Tue May 27 18:14:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 May 2014 18:14:09 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Print the thread-local stm statistics also for the main thread when Message-ID: <20140527161409.ACF221C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71742:a4b2331aad16 Date: 2014-05-27 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/a4b2331aad16/ Log: Print the thread-local stm statistics also for the main thread when it finishes. diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -819,6 +819,12 @@ print >> f, '\t'+line print >> f, '\treturn error;' print >> f, '}' + print >> f + # also generate the tear-down code (empty except for stm statistics) + print >> f, 'void RPython_TeardownCode(void) {' + if database.with_stm: + print >> f, '\tpypy_stm_teardown();' + print >> f, '}' def gen_stm_prebuilt(f, database): from rpython.translator.c.primitive import name_signed diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -60,6 +60,7 @@ pypy_malloc_counters_results(); + RPython_TeardownCode(); return exitcode; memory_out: diff --git a/rpython/translator/c/src/entrypoint.h b/rpython/translator/c/src/entrypoint.h --- a/rpython/translator/c/src/entrypoint.h +++ b/rpython/translator/c/src/entrypoint.h @@ -9,5 +9,6 @@ #endif char *RPython_StartupCode(void); +void RPython_TeardownCode(void); int PYPY_MAIN_FUNCTION(int argc, char *argv[]); #endif /* PYPY_STANDALONE */ diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -71,6 +71,12 @@ pypy_stm_start_inevitable_if_not_atomic(); } +void pypy_stm_teardown(void) +{ + pypy_stm_unregister_thread_local(); + /* stm_teardown() not called here for now; it's mostly for tests */ +} + long pypy_stm_enter_callback_call(void) { if (pypy_stm_ready_atomic == 0) { diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -19,6 +19,7 @@ */ void pypy_stm_setup(void); +void pypy_stm_teardown(void); void pypy_stm_setup_prebuilt(void); /* generated into stm_prebuilt.c */ void pypy_stm_register_thread_local(void); /* generated into stm_prebuilt.c */ void pypy_stm_unregister_thread_local(void); /* generated into stm_prebuilt.c */ From noreply at buildbot.pypy.org Tue May 27 17:58:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 27 May 2014 17:58:06 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix: un-comment some assertion that is really checking an important Message-ID: <20140527155806.CB0641C010D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71741:2898a860e1fa Date: 2014-05-27 17:57 +0200 http://bitbucket.org/pypy/pypy/changeset/2898a860e1fa/ Log: Fix: un-comment some assertion that is really checking an important property; and fix test_string:test_promote_string() by not calling make_result_of_lastop() indirectly from opimpl_str_guard_value(). diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -167,12 +167,11 @@ def make_result_of_lastop(self, resultbox): got_type = resultbox.type - # XXX disabled for now, conflicts with str_guard_value - #if not we_are_translated(): - # typeof = {'i': history.INT, - # 'r': history.REF, - # 'f': history.FLOAT} - # assert typeof[self.jitcode._resulttypes[self.pc]] == got_type + if not we_are_translated(): + typeof = {'i': history.INT, + 'r': history.REF, + 'f': history.FLOAT} + assert typeof[self.jitcode._resulttypes[self.pc]] == got_type target_index = ord(self.bytecode[self.pc-1]) if got_type == history.INT: self.registers_i[target_index] = resultbox @@ -1321,14 +1320,14 @@ self.metainterp.clear_exception() resbox = self.metainterp.execute_and_record_varargs(opnum, argboxes, descr=descr) - if resbox is not None: - self.make_result_of_lastop(resbox) - # ^^^ this is done before handle_possible_exception() because we - # need the box to show up in get_list_of_active_boxes() if pure and self.metainterp.last_exc_value_box is None and resbox: resbox = self.metainterp.record_result_of_call_pure(resbox) exc = exc and not isinstance(resbox, Const) if exc: + if resbox is not None: + self.make_result_of_lastop(resbox) + # ^^^ this is done before handle_possible_exception() because we + # need the box to show up in get_list_of_active_boxes() self.metainterp.handle_possible_exception() else: self.metainterp.assert_no_exception() From noreply at buildbot.pypy.org Tue May 27 16:39:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 16:39:56 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: again change the metric for explaining memory usage (not sure what's best) Message-ID: <20140527143956.CFA231C010D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5275:082595a5aacc Date: 2014-05-27 16:41 +0200 http://bitbucket.org/pypy/extradoc/changeset/082595a5aacc/ Log: again change the metric for explaining memory usage (not sure what's best) diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -377,11 +377,9 @@ propagated to all segments. \begin{figure*}[t] - \begin{centering} - \includegraphics[scale=0.8]{\string"segment addressing\string".pdf} - \par\end{centering} - - \protect\caption{Segment Addressing\label{fig:Segment-Addressing}} + \centering + \includegraphics[scale=0.8]{\string"segment addressing\string".pdf} + \caption{Segment Addressing\label{fig:Segment-Addressing}} \end{figure*} @@ -404,11 +402,9 @@ require one page of physical memory for all of them. \begin{figure}[h] - \begin{centering} - \includegraphics[scale=0.8]{\string"mmap pages\string".pdf} - \par\end{centering} - - \protect\caption{\texttt{mmap()} Page Mapping\label{fig:mmap()-Page-Mapping}} + \centering + \includegraphics[scale=0.8]{\string"mmap pages\string".pdf} + \caption{\texttt{mmap()} Page Mapping\label{fig:mmap()-Page-Mapping}} \end{figure} @@ -445,13 +441,11 @@ memory efficient again. \begin{figure}[h] - \begin{centering} - \includegraphics[width=1\columnwidth]{\string"page remapping\string".pdf} - \par\end{centering} - - \protect\caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to - segment~0, fully shared memory configuration. (III) privatise single - pages.\label{fig:Page-Remapping}} + \centering + \includegraphics[width=1\columnwidth]{\string"page remapping\string".pdf} + \caption{Page Remapping: (I) after \texttt{mmap()}. (II) remap all pages to + segment~0, fully shared memory configuration. (III) privatise single + pages.\label{fig:Page-Remapping}} \end{figure} @@ -641,11 +635,9 @@ \begin{figure*}[t] - \begin{centering} - \includegraphics[scale=0.8]{\string"segment layout\string".pdf} - \par\end{centering} - - \protect\caption{Segment Layout\label{fig:Segment-Layout}} + \centering + \includegraphics[scale=0.8]{\string"segment layout\string".pdf} + \caption{Segment Layout\label{fig:Segment-Layout}} \end{figure*} @@ -1003,12 +995,12 @@ space. Thus, it can be at most the total size of memory used by old objects divided by 16. -Third, since we do copy on write, if all objects were to be -written to at the same time, all pages would need to be privatised -for all objects in the old object space. In that case we would -need the total amount of memory required by old objects multiplied -by $N+1$ (incl. the sharing segment). Pages get re-shared during -major collections if possible. +Third, since we do copy on write, if all objects were to be written to +at the same time, all pages would need to be privatised for all +objects in the old object space. In that case we would need the total +amount of memory required by old objects multiplied by $N+1$ +(incl. the sharing segment). During major collections we re-share the +pages if possible. In figure \ref{fig:richards_mem} we look at the memory usage of one of our benchmarks called Richards\footnote{OS kernel simulation @@ -1017,12 +1009,21 @@ sharp drops in memory usage come from major collections that free old objects and re-share pages. The average memory usage stays around 29~MiB and we see that in this benchmark we have around 1 major -collection every second. The \emph{page privatisation}, which -represents the percentage of used pages with at least one private -copy, shows the same spikes as the memory usage. These come directly from -re-sharing the pages. The maximum page privatisation is around $20\%$ -between major collections. Thus we can say that $~20\%$ of the old -objects get modified between collections in this benchmark. +collection every second. + +The \emph{page privatisation}, which represents the ratio between +private pages and shared pages, gives us an idea about how many +private copies exist for each shared page. If the ratio is $>1.0$, it +means that on average we have more than one private copy for each +shared page. In the worst case described above, this number would +reach $N$ and means we need $N\times$ the memory for the private +pages alone. In this benchmark, we see the same spikes as the memory +usage. These come directly from re-sharing the pages. The maximum page +privatisation is around $0.5$ between major collections, thus the +private pages are responsible for a $50\%$ increase in the required +memory. Since the spikes in the GC managed memory line actually +show increases by $~80\%$, it means that the rest comes from actual +garbage objects that were collected. For PyPy-STM the average memory requirement is 29~MiB and there are $\sim 11$ major collections during the runtime. Normal PyPy with a GIL @@ -1044,6 +1045,7 @@ \end{figure} + \subsection{Overhead Breakdown} \remi{do it on a non-jit build (see reason above)} diff --git a/talk/dls2014/paper/plots/plot_richards_mem.py b/talk/dls2014/paper/plots/plot_richards_mem.py --- a/talk/dls2014/paper/plots/plot_richards_mem.py +++ b/talk/dls2014/paper/plots/plot_richards_mem.py @@ -34,7 +34,7 @@ xs.append(float(time) - first_time) real_mem, max_rss, page_util = mems.split("/") y1s.append(int(real_mem) / 1024. / 1024) - y2s.append(float(page_util) * 100) + y2s.append(float(page_util)) # RSS: # x2s = range(12) @@ -48,8 +48,8 @@ def plot_mems(ax, ax2): print sum(y1s) / len(xs) print sum(y2s) / len(xs) - a, = ax.plot(xs, y1s, 'b-') - b, = ax2.plot(xs, y2s, 'r-') + a, = ax.plot(xs, y1s, 'b-o', ms=3) + b, = ax2.plot(xs, y2s, 'r-x', ms=3) return ax.legend((a, b), ('GC managed memory', 'Page privatisation')) @@ -64,12 +64,12 @@ ax.set_ylabel("Memory [MiB]", color='b') ax.set_xlabel("Runtime [s]") - ax.set_xlim(-0.5, 11.5) ax.set_ylim(0, 50) ax2 = ax.twinx() - ax2.set_ylim(0, 100) - ax2.set_ylabel("\% of pages with $>1$ private copy", + ax.set_xlim(-0.5, 11.8) + ax2.set_ylim(0, 1) + ax2.set_ylabel("Ratio = ${private~pages}\over{shared~pages}$", color='r') legend = plot_mems(ax, ax2) diff --git a/talk/dls2014/paper/plots/richards_mem.pdf b/talk/dls2014/paper/plots/richards_mem.pdf index 17e9e4ae371aeb31f4e7578a35b7ab0ebb9a775c..c2e6b5c0924fe38ec35ab1a467cda4f6c3810450 GIT binary patch [cut] From noreply at buildbot.pypy.org Tue May 27 18:38:57 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 27 May 2014 18:38:57 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: re-run with no-jit build and add scaling micro-bench Message-ID: <20140527163857.AA94E1C3085@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5276:d74b99852f5a Date: 2014-05-27 18:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/d74b99852f5a/ Log: re-run with no-jit build and add scaling micro-bench diff --git a/talk/dls2014/paper/TODO b/talk/dls2014/paper/TODO --- a/talk/dls2014/paper/TODO +++ b/talk/dls2014/paper/TODO @@ -1,3 +1,4 @@ +* discuss removal GIL ruby paper * measure things ** memory: residual & GC-numbers ** overhead: breakdown (maybe also with multiple threads) diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1019,14 +1019,14 @@ reach $N$ and means we need $N\times$ the memory for the private pages alone. In this benchmark, we see the same spikes as the memory usage. These come directly from re-sharing the pages. The maximum page -privatisation is around $0.5$ between major collections, thus the -private pages are responsible for a $50\%$ increase in the required +privatisation is around $0.9$ between major collections, thus the +private pages are responsible for a $90\%$ increase in the required memory. Since the spikes in the GC managed memory line actually -show increases by $~80\%$, it means that the rest comes from actual +show increases by $~100\%$, it means that the rest comes from actual garbage objects that were collected. -For PyPy-STM the average memory requirement is 29~MiB and there are -$\sim 11$ major collections during the runtime. Normal PyPy with a GIL +For PyPy-STM the average memory requirement is 21~MiB and there are +$\sim 10$ major collections during the runtime. Normal PyPy with a GIL grows its memory up to just 7~MiB and does not do a single major collection in that time. Compared to normal PyPy, we are missing a memory optimisation to store small objects in a more compact @@ -1034,6 +1034,8 @@ duplicate any data structures like e.g. the Nursery for each thread. This, the missing optimisation, and the additional memory requirements for STM explained above account for this difference. +We expect to improve this aspect in the future, in this paper we +want to focus first on performance. \remi{I don't know how much sense it makes to go deeper. We will improve this in the future, but right now this is the overall picture.} @@ -1062,7 +1064,29 @@ \subsection{Scaling} -maybe some simple micro benchmarks with adaptable conflict rate +To asses how well the STM system scales on its own (without any real +workload), we execute the following loop on 1 to 4 threads: +\begin{lstlisting} +def workload(): + i = 20000000 + while i: + i -= 1 +\end{lstlisting} + +For the results in figure \ref{fig:scaling}, we averaged +over 5 runs and normalised the average runtimes to the +time it took on a single thread. From this we see that there +is additional overhead introduced by each thread ($13\%$ +for all 4 threads together). + +\remi{what we don't show is by how much this overhead is influenced +by allocations} + +\begin{figure}[h] + \centering + \includegraphics[width=1\columnwidth]{plots/scaling.pdf} + \caption{Scalability of the STM system\label{fig:scaling}} +\end{figure} \subsection{Real-World Benchmarks\label{sec:real-world-bench}} diff --git a/talk/dls2014/paper/plots/bench_scaling.py b/talk/dls2014/paper/plots/bench_scaling.py new file mode 100644 --- /dev/null +++ b/talk/dls2014/paper/plots/bench_scaling.py @@ -0,0 +1,20 @@ +import thread +import sys + + +lock = thread.allocate_lock() + +def workload(): + i = 20000000 + while i: + i -= 1 + lock.release() + +running = range(int(sys.argv[1])) + +lock.acquire() +for i in running[:]: + thread.start_new_thread(workload, ()) +lock.acquire() +print "done" +#import os; os._exit(0) diff --git a/talk/dls2014/paper/plots/plot_richards_mem.py b/talk/dls2014/paper/plots/plot_richards_mem.py --- a/talk/dls2014/paper/plots/plot_richards_mem.py +++ b/talk/dls2014/paper/plots/plot_richards_mem.py @@ -67,8 +67,8 @@ ax.set_ylim(0, 50) ax2 = ax.twinx() - ax.set_xlim(-0.5, 11.8) - ax2.set_ylim(0, 1) + ax.set_xlim(-0.5, 9.8) + ax2.set_ylim(0, 1.5) ax2.set_ylabel("Ratio = ${private~pages}\over{shared~pages}$", color='r') legend = plot_mems(ax, ax2) diff --git a/talk/dls2014/paper/plots/plot_scaling.py b/talk/dls2014/paper/plots/plot_scaling.py new file mode 100755 --- /dev/null +++ b/talk/dls2014/paper/plots/plot_scaling.py @@ -0,0 +1,106 @@ +#!/usr/bin/python + +# obtained with time on +# pypy-c --jit off bench_scaling.py [1-4] + + +import matplotlib +import os +import sys +matplotlib.use('gtkagg') + +from matplotlib import rc +#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) +## for Palatino and other serif fonts use: +rc('font',**{'family':'serif','serif':['Palatino']}) +rc('text', usetex=True) + +args = None +import matplotlib.pyplot as plt +# import pprint - slow as hell + +xs = range(1,5) +ys = [[1.73, 1.74, 1.73, 1.73, 1.74], + [1.75, 1.77, 1.78, 1.75, 1.75], + [1.8, 1.79, 1.76, 1.76, 1.79], + [1.82, 2.1, 1.84, 1.9, 2.13]] + + + +def plot_mems(ax): + import numpy as np + y = [] + yerr = [] + opt_y = [1.0] * len(xs) + first_time = np.mean(ys[0]) + for x, d in zip(xs, ys): + normalized = map(lambda x:x/first_time, d) + y.append(np.mean(normalized)) + yerr.append(np.std(normalized)) + + print y + ax.errorbar(xs, y, yerr=yerr, + label="STM") + ax.plot(xs, opt_y, label="optimal") + return ax.legend(loc=4) + + +def main(): + global fig + + print "Draw..." + fig = plt.figure() + + ax = fig.add_subplot(111) + + ax.set_ylabel("Runtime normalized to 1 thread") + ax.set_xlabel("Threads") + ax.set_ylim(0, 1.5) + ax.set_xlim(0, 5) + + legend = plot_mems(ax) + + + #axs[0].set_ylim(0, len(x)) + #ax.set_yticks([r+0.5 for r in range(len(logs))]) + #ax.set_yticklabels(range(1, len(logs)+1)) + #axs[0].set_xticks([]) + + # def label_format(x, pos): + # return "%.2f" % (abs((x - left) * 1e-6), ) + # major_formatter = matplotlib.ticker.FuncFormatter(label_format) + # axs[0].xaxis.set_major_formatter(major_formatter) + + #ax.set_title("Memory Usage in Richards") + + plt.draw() + #plt.show() + print "Drawn." + + file_name = "scaling.pdf" + plt.savefig(file_name, format='pdf', + bbox_extra_artists=(legend,), + bbox_inches='tight', pad_inches=0) + + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description='Plot stm log files') + parser.add_argument('--figure-size', default='6x4', + help='set figure size in inches: format=6x4') + parser.add_argument('--font-size', default='10.0', + help='set font size in pts: 10.0') + parser.add_argument('--png-dpi', default='300', + help='set dpi of png output: 300') + + + args = parser.parse_args() + matplotlib.rcParams.update( + {'figure.figsize': tuple(map(int, args.figure_size.split('x'))), + 'font.size': float(args.font_size), + 'savefig.dpi': int(args.png_dpi), + }) + + + main() diff --git a/talk/dls2014/paper/plots/richards_mem.pdf b/talk/dls2014/paper/plots/richards_mem.pdf index c2e6b5c0924fe38ec35ab1a467cda4f6c3810450..ac151b083994c0792d0a271a1b6bd3f9cb688cfc GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/scaling.pdf b/talk/dls2014/paper/plots/scaling.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3b5b43b271d3c03d3edf230a38d24389b7bfdb6c GIT binary patch [cut] From noreply at buildbot.pypy.org Tue May 27 19:59:24 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 27 May 2014 19:59:24 +0200 (CEST) Subject: [pypy-commit] pypy default: create noneify() method to handle unions with s_None Message-ID: <20140527175924.C36EB1C3288@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71744:f02e2ea76f2e Date: 2014-05-27 18:57 +0100 http://bitbucket.org/pypy/pypy/changeset/f02e2ea76f2e/ Log: create noneify() method to handle unions with s_None diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -768,6 +768,22 @@ # mixing Nones with other objects +class __extend__(pairtype(SomeObject, SomeNone)): + def union((obj, none)): + return obj.noneify() + +class __extend__(pairtype(SomeNone, SomeObject)): + def union((none, obj)): + return obj.noneify() + +class __extend__(pairtype(SomeImpossibleValue, SomeNone)): + def union((imp1, none)): + return s_None + +class __extend__(pairtype(SomeNone, SomeImpossibleValue)): + def union((none, imp2)): + return s_None + def _make_none_union(classname, constructor_args='', glob=None): if glob is None: glob = globals() diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -122,6 +122,9 @@ def can_be_none(self): return True + def noneify(self): + raise UnionError(self, s_None) + def nonnoneify(self): return self @@ -476,7 +479,12 @@ return self.can_be_None def nonnoneify(self): - return SomePBC(self.descriptions, can_be_None=False) + return SomePBC(self.descriptions, can_be_None=False, + subset_of=self.subset_of) + + def noneify(self): + return SomePBC(self.descriptions, can_be_None=True, + subset_of=self.subset_of) def fmt_descriptions(self, pbis): if hasattr(self, 'const'): From noreply at buildbot.pypy.org Tue May 27 22:10:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 27 May 2014 22:10:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k: oops, avoid we_are_translated at module scope Message-ID: <20140527201055.DAA461C010D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71745:9a4c6f4f53b6 Date: 2014-05-27 13:07 -0700 http://bitbucket.org/pypy/pypy/changeset/9a4c6f4f53b6/ Log: oops, avoid we_are_translated at module scope diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -11,7 +11,6 @@ else: MAXUNICODE = 0xffff -NARROW_HOST = not we_are_translated() and sys.maxunicode == 0xFFFF BYTEORDER = sys.byteorder # python 2.7 has a preview of py3k behavior, so those functions @@ -64,7 +63,7 @@ if MAXUNICODE > 0xFFFF: def code_to_unichr(code): - if NARROW_HOST: + if is_narrow_host(): # Host CPython is narrow build, generate surrogates return unichr_returns_surrogate(code) else: @@ -84,6 +83,9 @@ result.append(hi) result.append(lo) +def is_narrow_host(): + return not we_are_translated() and sys.maxunicode == 0xFFFF + def default_unicode_error_decode(errors, encoding, msg, s, startingpos, endingpos): if errors == 'replace': @@ -336,7 +338,7 @@ # Check for low surrogate and combine the two to # form a UCS4 value if ((allow_surrogates or MAXUNICODE < 65536 - or NARROW_HOST) and + or is_narrow_host()) and ch <= 0xDBFF and 0xDC00 <= ch2 <= 0xDFFF): ch3 = ((ch - 0xD800) << 10 | (ch2 - 0xDC00)) + 0x10000 assert ch3 >= 0 @@ -1344,7 +1346,7 @@ # The following logic is enabled only if MAXUNICODE == 0xffff, or # for testing on top of a host Python where sys.maxunicode == 0xffff - if ((MAXUNICODE < 65536 or NARROW_HOST) + if ((MAXUNICODE < 65536 or is_narrow_host()) and 0xD800 <= oc < 0xDC00 and pos + 1 < size): # Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes pos += 1 From noreply at buildbot.pypy.org Tue May 27 22:10:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 27 May 2014 22:10:57 +0200 (CEST) Subject: [pypy-commit] pypy py3k: assume narrow build behavior here when hosted on a narrow python Message-ID: <20140527201057.56AA41C010D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71746:1e851feb2d8c Date: 2014-05-27 13:08 -0700 http://bitbucket.org/pypy/pypy/changeset/1e851feb2d8c/ Log: assume narrow build behavior here when hosted on a narrow python diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -545,7 +545,7 @@ raises(UnicodeEncodeError, '\ud800'.encode, 'utf-8') raises(UnicodeEncodeError, '\udc00'.encode, 'utf-8') raises(UnicodeEncodeError, '\udc00!'.encode, 'utf-8') - if sys.maxunicode > 0xFFFF: + if sys.maxunicode > 0xFFFF and len(chr(0x10000)) == 1: raises(UnicodeEncodeError, '\ud800\udc02'.encode, 'utf-8') raises(UnicodeEncodeError, '\ud84d\udc56'.encode, 'utf-8') raises(UnicodeEncodeError, ('\ud800\udc02'*1000).encode, 'utf-8') From noreply at buildbot.pypy.org Tue May 27 22:10:58 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 27 May 2014 22:10:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k: skip these when hosted on a narrow build, they require a fully fledged wide build Message-ID: <20140527201058.B65F31C010D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71747:9af57c80e37f Date: 2014-05-27 13:09 -0700 http://bitbucket.org/pypy/pypy/changeset/9af57c80e37f/ Log: skip these when hosted on a narrow build, they require a fully fledged wide build diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -378,6 +378,7 @@ assert self.almost_equal(complex(real=float2(17.), imag=float2(23.)), 17+23j) raises(TypeError, complex, float2(None)) + @py.test.mark.skipif("not config.option.runappdirect and sys.maxunicode == 0xffff") def test_constructor_unicode(self): b1 = '\N{MATHEMATICAL BOLD DIGIT ONE}' # 𝟏 b2 = '\N{MATHEMATICAL BOLD DIGIT TWO}' # 𝟐 diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -471,6 +471,7 @@ else: assert False, 'did not raise' + @py.test.mark.skipif("not config.option.runappdirect and sys.maxunicode == 0xffff") def test_float_from_unicode(self): s = '\U0001D7CF\U0001D7CE.4' # 𝟏𝟎.4 assert float(s) == 10.4 diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -360,6 +360,7 @@ b = A(5).real assert type(b) is int + @py.test.mark.skipif("not config.option.runappdirect and sys.maxunicode == 0xffff") def test_long_from_unicode(self): raises(ValueError, int, '123L') assert int('L', 22) == 21 diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -11,7 +11,10 @@ w_s = space.wrap(u"\N{EM SPACE}-3\N{EN SPACE}") s2 = unicode_to_decimal_w(space, w_s) assert s2 == " -3 " - # + + @py.test.mark.skipif("not config.option.runappdirect and sys.maxunicode == 0xffff") + def test_unicode_to_decimal_w_wide(self, space): + from pypy.objspace.std.unicodeobject import unicode_to_decimal_w w_s = space.wrap(u'\U0001D7CF\U0001D7CE') # 𝟏𝟎 s2 = unicode_to_decimal_w(space, w_s) assert s2 == "10" @@ -238,6 +241,8 @@ # single surrogate character assert not "\ud800".isprintable() + @py.test.mark.skipif("not config.option.runappdirect and sys.maxunicode == 0xffff") + def test_isprintable_wide(self): assert '\U0001F46F'.isprintable() # Since unicode 6.0 assert not '\U000E0020'.isprintable() From noreply at buildbot.pypy.org Tue May 27 22:11:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 27 May 2014 22:11:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k: thread -> _thread Message-ID: <20140527201100.2847A1C010D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71748:c405b0cd87fb Date: 2014-05-27 13:09 -0700 http://bitbucket.org/pypy/pypy/changeset/c405b0cd87fb/ Log: thread -> _thread diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -208,7 +208,7 @@ else: raise Exception("could unexpectedly start 1000 threads") # safety: check that we can start a new thread here - thread.start_new_thread(lambda: None, ()) + _thread.start_new_thread(lambda: None, ()) def test_stack_size(self): import _thread From noreply at buildbot.pypy.org Tue May 27 22:23:48 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 27 May 2014 22:23:48 +0200 (CEST) Subject: [pypy-commit] pypy default: kill _make_none_union() Message-ID: <20140527202348.0CA371C3434@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71749:a57283e9fbd9 Date: 2014-05-27 21:23 +0100 http://bitbucket.org/pypy/pypy/changeset/a57283e9fbd9/ Log: kill _make_none_union() diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -2,12 +2,11 @@ Binary operations between SomeValues. """ -import py import operator from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, - SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, + SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeSingleFloat, @@ -784,29 +783,6 @@ def union((none, imp2)): return s_None -def _make_none_union(classname, constructor_args='', glob=None): - if glob is None: - glob = globals() - loc = locals() - source = py.code.Source(""" - class __extend__(pairtype(%(classname)s, SomeNone)): - def union((obj, none)): - return %(classname)s(%(constructor_args)s) - - class __extend__(pairtype(SomeNone, %(classname)s)): - def union((none, obj)): - return %(classname)s(%(constructor_args)s) - """ % loc) - exec source.compile() in glob - -_make_none_union('SomeInstance', 'classdef=obj.classdef, can_be_None=True') -_make_none_union('SomeString', 'no_nul=obj.no_nul, can_be_None=True') -_make_none_union('SomeUnicodeString', 'can_be_None=True') -_make_none_union('SomeList', 'obj.listdef') -_make_none_union('SomeOrderedDict', 'obj.dictdef') -_make_none_union('SomeDict', 'obj.dictdef') -_make_none_union('SomeWeakRef', 'obj.classdef') - class __extend__(pairtype(SomePBC, SomeObject)): def getitem((pbc, o)): diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -261,11 +261,17 @@ "Stands for an object which is known to be a string." knowntype = str + def noneify(self): + return SomeString(can_be_None=True, no_nul=self.no_nul) + class SomeUnicodeString(SomeStringOrUnicode): "Stands for an object which is known to be an unicode string" knowntype = unicode + def noneify(self): + return SomeUnicodeString(can_be_None=True, no_nul=self.no_nul) + class SomeByteArray(SomeStringOrUnicode): immutable = False @@ -316,6 +322,9 @@ def can_be_none(self): return True + def noneify(self): + return SomeList(self.listdef) + class SomeTuple(SomeObject): "Stands for a tuple of known length." @@ -361,6 +370,9 @@ else: return '{...%s...}' % (len(const),) + def noneify(self): + return type(self)(self.dictdef) + class SomeOrderedDict(SomeDict): try: from collections import OrderedDict as knowntype @@ -420,6 +432,9 @@ def nonnoneify(self): return SomeInstance(self.classdef, can_be_None=False) + def noneify(self): + return SomeInstance(self.classdef, can_be_None=True) + class SomePBC(SomeObject): """Stands for a global user instance, built prior to the analysis, @@ -592,6 +607,9 @@ # 'classdef' is None for known-to-be-dead weakrefs. self.classdef = classdef + def noneify(self): + return SomeWeakRef(self.classdef) + # ____________________________________________________________ diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -446,6 +446,9 @@ def rtyper_makekey(self): return self.__class__, + def noneify(self): + return self + class SomeUnicodeBuilder(SomeObject): def method_append(self, s_str): @@ -483,6 +486,9 @@ def rtyper_makekey(self): return self.__class__, + def noneify(self): + return self + class BaseEntry(object): def compute_result_annotation(self, s_init_size=None): @@ -506,29 +512,6 @@ use_unicode = True -class __extend__(pairtype(SomeStringBuilder, SomePBC)): - def union((sb, p)): - assert p.const is None - return SomeStringBuilder() - - -class __extend__(pairtype(SomePBC, SomeStringBuilder)): - def union((p, sb)): - assert p.const is None - return SomeStringBuilder() - - -class __extend__(pairtype(SomeUnicodeBuilder, SomePBC)): - def union((sb, p)): - assert p.const is None - return SomeUnicodeBuilder() - - -class __extend__(pairtype(SomePBC, SomeUnicodeBuilder)): - def union((p, sb)): - assert p.const is None - return SomeUnicodeBuilder() - #___________________________________________________________________ # Support functions for SomeString.no_nul diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -1,6 +1,5 @@ from rpython.annotator import model as annmodel from rpython.tool.pairtype import pairtype -from rpython.annotator.binaryop import _make_none_union, SomeNone # SomeNone needed by _make_none_union from rpython.annotator.bookkeeper import getbookkeeper from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.annlowlevel import cachedtype @@ -216,6 +215,9 @@ def can_be_none(self): return self.controller.can_be_None + def noneify(self): + return SomeControlledInstance(self.s_real_obj, self.controller) + def rtyper_makerepr(self, rtyper): from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr return ControlledInstanceRepr(rtyper, self.s_real_obj, self.controller) @@ -224,7 +226,6 @@ real_key = self.s_real_obj.rtyper_makekey() return self.__class__, real_key, self.controller -_make_none_union("SomeControlledInstance", "obj.s_real_obj, obj.controller", globals()) class __extend__(SomeControlledInstance): From noreply at buildbot.pypy.org Wed May 28 09:17:32 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 May 2014 09:17:32 +0200 (CEST) Subject: [pypy-commit] benchmarks default: some use of hint_commit_soon Message-ID: <20140528071732.D2F5F1C06C0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r261:0d81c9b1ec8e Date: 2014-05-28 09:18 +0200 http://bitbucket.org/pypy/benchmarks/changeset/0d81c9b1ec8e/ Log: some use of hint_commit_soon diff --git a/multithread/bottle/app.py b/multithread/bottle/app.py --- a/multithread/bottle/app.py +++ b/multithread/bottle/app.py @@ -1,4 +1,5 @@ -from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, hint_commit_soon) from BaseHTTPServer import HTTPServer import threading, time @@ -55,7 +56,12 @@ @bottle.route('/') def index(): - time.sleep(0.5) + with atomic: + i = 10000 + res = "" + while i: + i -= 1 + res += str(i) return "hi from " + threading.currentThread().getName() diff --git a/multithread/btree/btree.py b/multithread/btree/btree.py --- a/multithread/btree/btree.py +++ b/multithread/btree/btree.py @@ -1,6 +1,8 @@ # https://github.com/MartinThoma/algorithms/tree/master/datastructures -from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, + hint_commit_soon, print_abort_info) import time, threading import random @@ -203,6 +205,7 @@ ancestors.append((node, index)) node, index = ancestors.pop() node.insert(index, item, ancestors) + hint_commit_soon() return True def remove(self, item): @@ -211,6 +214,7 @@ if self._present(item, ancestors): node, index = ancestors.pop() node.remove(index, ancestors) + hint_commit_soon() # else: # raise ValueError("%r not in %s" % (item, self.__class__.__name__)) @@ -307,7 +311,8 @@ ###################################################################### ###################################################################### -OPS = [BTree.__contains__] * 98 + [BTree.insert, BTree.remove] +CONFLICTING = [BTree.insert, BTree.remove] +OPS = [BTree.__contains__] * 98 + CONFLICTING ITEM_RANGE = 10000 @@ -319,8 +324,12 @@ for _ in xrange(ops): op = r.choice(OPS) elem = r.randint(1, ITEM_RANGE) + # cflts = op in CONFLICTING + # if cflts: + # hint_commit_soon() with atomic: op(tree, elem) + #print_abort_info(0.00001) print "task ended" diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -3,13 +3,16 @@ import thread, atexit, sys, time try: - from atomic import atomic, getsegmentlimit, print_abort_info + from atomic import (atomic, getsegmentlimit, print_abort_info, + hint_commit_soon) except: atomic = RLock() def getsegmentlimit(): return 1 def print_abort_info(tm=0.0): pass + def hint_commit_soon(): + pass class TLQueue_concurrent(object): @@ -140,7 +143,9 @@ def _task(self, func, *args, **kwargs): with self._cond: try: + hint_commit_soon() self._result = func(*args, **kwargs) + hint_commit_soon() except Exception as e: self._exception = e finally: diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -2,7 +2,9 @@ # Date: 14.03.2013 from math import sqrt, pi -from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, + print_abort_info, hint_commit_soon) import time AMBIENT = 0.1 @@ -133,6 +135,7 @@ (Vector(x/50.0-5,y/50.0-5,0)-cameraPos).normal()) col = trace(ray, objs, lightSource, 10) line[y] = (col.x + col.y + col.z) / 3.0 + #print_abort_info(0.00001) return x @@ -142,7 +145,6 @@ - def run(ths=8, w=1024, h=1024): ths = int(ths) w = int(w) diff --git a/multithread/skiplist/skiplist.py b/multithread/skiplist/skiplist.py --- a/multithread/skiplist/skiplist.py +++ b/multithread/skiplist/skiplist.py @@ -1,6 +1,8 @@ # https://github.com/kunigami/blog-examples/tree/master/2012-09-23-skip-list -from common.abstract_threading import atomic, Future, set_thread_pool, ThreadPool +from common.abstract_threading import (atomic, Future, + set_thread_pool, ThreadPool, + print_abort_info, hint_commit_soon) import time, threading import random @@ -52,7 +54,7 @@ def insert(self, elem): node = SkipNode(self.randomHeight(), elem) - # conflicts with every find(): + # conflicts with everything else: self.maxHeight = max(self.maxHeight, len(node.next)) while len(self.head.next) < len(node.next): @@ -64,16 +66,19 @@ node.next[i] = update[i].next[i] update[i].next[i] = node self.len += 1 + hint_commit_soon() def remove(self, elem): update = self.updateList(elem) x = self.find(elem, update) if x != None: + # conflicts with everything else: for i in reversed(range(len(x.next))): update[i].next[i] = x.next[i] if self.head.next[i] == None: self.maxHeight -= 1 self.len -= 1 + hint_commit_soon() def printList(self): for i in range(len(self.head.next)-1, -1, -1): @@ -84,9 +89,9 @@ print '' - -OPS = [SkipList.find] * 98 + [SkipList.insert, SkipList.remove] -ITEM_RANGE = 10000 +CONFLICTING = [SkipList.insert, SkipList.remove] +OPS = [SkipList.find] * 98 + CONFLICTING +ITEM_RANGE = 1000000 def task(id, slist, ops): print "start task with %s ops" % ops @@ -97,9 +102,13 @@ for _ in xrange(ops): op = r.choice(OPS) elem = r.randint(1, ITEM_RANGE) + # if op in CONFLICTING: + # hint_commit_soon() with atomic: op(slist, elem) + #print_abort_info(0.0001) + print "task ended" From noreply at buildbot.pypy.org Wed May 28 14:59:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 May 2014 14:59:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add first (bad) results for JIT in our benchmarks Message-ID: <20140528125934.11A6D1C350E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5277:2308d810d1bf Date: 2014-05-28 15:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/2308d810d1bf/ Log: add first (bad) results for JIT in our benchmarks diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -950,7 +950,7 @@ Here, we will not go into detail about the integration of our STM system with PyPy's JIT. In fact, we will disable it for all benchmarks -except those in section \ref{sec:real-world-bench}. We would like to +except those in section \ref{sec:performance-bench}. We would like to regard it as a simple performance enhancement, but that is not what happens in reality. First, since the JIT is a tracing JIT\remi{explain?} running in multiple threads, it may compile @@ -962,7 +962,7 @@ exposes the overhead of STM more by speeding up all the rest. Overall, we believe that disabling it on all benchmarks except the -real-world benchmarks in section \ref{sec:real-world-bench} is better +performance benchmarks in section \ref{sec:performance-bench} is better because we can minimise non-determinism. We also do not want to depend on the capabilities of the JIT in these experiments. @@ -1089,7 +1089,13 @@ \end{figure} -\subsection{Real-World Benchmarks\label{sec:real-world-bench}} +\subsection{Performance Benchmarks\label{sec:performance-bench}} + +\remi{For performance we first look at no-JIT behaviour of STM. Since +we cannot compete well even with CPython, we later show JIT benchmarks +where we see the unstable performance but also that we can still scale. +(with more work we can use our STM system to parallelise jitted code +too)} more real benchmarks comparing multiple implementations: \begin{itemize}[noitemsep] @@ -1102,6 +1108,18 @@ \end{itemize} +% TODO: Jython +\remi{Some benchmarks (figure \ref{fig:performance-jit} with enabled +JIT show that we can be competitive with the other solutions. It also +shows that more work is needed in that area to make performance more +stable.} + +\begin{figure}[h] + \centering + \includegraphics[width=1\columnwidth]{plots/performance.pdf} + \caption{Comparing runtime between interpreters with JIT\label{fig:performance-jit}} +\end{figure} + \section{Related Work} diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf new file mode 100644 index 0000000000000000000000000000000000000000..99fa8230f1a046efb2fe92225955fc00178fdaa0 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py new file mode 100755 --- /dev/null +++ b/talk/dls2014/paper/plots/plot_performance.py @@ -0,0 +1,227 @@ +#!/usr/bin/python + +# benchmarks-repo at 0d81c9b1ec8e + +# for now: avg & stddev of the best + +# pypy-c-paper-jit bench.py -k5 raytrace/raytrace.py 1-4 +# pypy-c-paper-jit bench.py -k5 btree/btree.py 1-4 +# pypy-c-paper-jit bench.py -k5 skiplist/skiplist.py 1-4 +# pypy-c-paper-jit bench.py -k5 threadworms/threadworms.py 1-4 +# pypy-c-paper-jit bench.py -k5 mandelbrot/mandelbrot.py 1-4 64 +# pypy-c-paper-jit multithread-richards.py 10000 1-4 # report runtime + + + +import matplotlib +import os +import sys +matplotlib.use('gtkagg') + +from matplotlib import rc +#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) +## for Palatino and other serif fonts use: +rc('font',**{'family':'serif','serif':['Palatino']}) +rc('text', usetex=True) + +args = None +import matplotlib.pyplot as plt +# import pprint - slow as hell + +# threads +ts = range(1,5) + +interps_styles = { + "pypy-stm-jit": 'r-', + "pypy-jit": 'b--', + "best": "k:" +} + +benchs = { + "raytrace":{ + "pypy-stm-jit":[ + [3.91, 3.87], + [2.53, 2.52], + [2.23], + [2.46, 2.6] + ], + "pypy-jit":[ + [1.6], + [2.17], + [3.33], + [4.16] + ]}, + + "btree":{ + "pypy-stm-jit":[ + [1.68], + [1.3], + [1.39], + [1.66] + ], + "pypy-jit":[ + [1.6], + [3.3], + [5.1], + [5.8] + ]}, + + "skiplist":{ + "pypy-stm-jit":[ + [2.9], + [3.0], + [3.4], + [3.8] + ], + "pypy-jit":[ + [2.14], + [4.5], + [6.2], + [6.58] + ]}, + + "threadworms":{ + "pypy-stm-jit":[ + [4.23], + [3.4], + [3.16], + [3.4, 3.3] + ], + "pypy-jit":[ + [4.14], + [12.5], + [16], + [20] + ]}, + + "mandelbrot":{ + "pypy-stm-jit":[ + [18.5], + [9.9], + [8.4], + [7.2] + ], + "pypy-jit":[ + [13.5], + [14.3], + [14.5], + [14.1] + ]}, + + "richards":{ + "pypy-stm-jit":[ + [63.4], + [33.1], + [24.9,36], + [27,39,63] + ], + "pypy-jit":[ + [30.7], + [31.4], + [33], + [32.0] + ]} +} + + + + +def plot_speedups(plt): + import numpy as np + from collections import OrderedDict + fig = plt.figure() + + legend = OrderedDict() + w, h = 2, 3 + axs = {} + for i, (name, contestants) in enumerate(benchs.items()): + if i >= w: + sharex = axs[i - w] + else: + sharex = None + ax = fig.add_subplot(h, w, i+1, sharex=sharex) + axs[i] = ax + max_y = 0 + best_y = 9999999 + for interp, runs in contestants.items(): + y = [] + yerr = [] + for r in runs: + new_y = np.mean(r) + y.append(new_y) + yerr.append(np.std(r)) + if new_y > max_y: + max_y = new_y + if new_y < best_y: + best_y = new_y + + artist = ax.errorbar(ts, y, yerr=yerr, + fmt=interps_styles[interp]) + if interp not in legend: + legend[interp] = artist + + legend["best"], = ax.plot(ts, [best_y] * len(ts), + interps_styles["best"]) + + if i // w == h-1: + ax.set_xlim(0, 5) + ax.set_xlabel("Threads") + ax.set_ylim(0, max_y * 1.1) + if i % w == 0: + ax.set_ylabel("Runtime [s]") + ax.set_title(name) + + return axs[w*(h-1)].legend(tuple(legend.values()), tuple(legend.keys()), + ncol=3, + loc=(0,-0.4)) + + +def main(): + global fig + + print "Draw..." + legend = plot_speedups(plt) + + #axs[0].set_ylim(0, len(x)) + #ax.set_yticks([r+0.5 for r in range(len(logs))]) + #ax.set_yticklabels(range(1, len(logs)+1)) + #axs[0].set_xticks([]) + + # def label_format(x, pos): + # return "%.2f" % (abs((x - left) * 1e-6), ) + # major_formatter = matplotlib.ticker.FuncFormatter(label_format) + # axs[0].xaxis.set_major_formatter(major_formatter) + + #ax.set_title("Memory Usage in Richards") + + plt.draw() + #plt.show() + print "Drawn." + + file_name = "performance.pdf" + plt.savefig(file_name, format='pdf', + bbox_extra_artists=(legend,), + bbox_inches='tight', pad_inches=0) + + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description='Plot stm log files') + parser.add_argument('--figure-size', default='7x8', + help='set figure size in inches: format=6x4') + parser.add_argument('--font-size', default='10.0', + help='set font size in pts: 10.0') + parser.add_argument('--png-dpi', default='300', + help='set dpi of png output: 300') + + + args = parser.parse_args() + matplotlib.rcParams.update( + {'figure.figsize': tuple(map(int, args.figure_size.split('x'))), + 'font.size': float(args.font_size), + 'savefig.dpi': int(args.png_dpi), + }) + + + main() From noreply at buildbot.pypy.org Wed May 28 15:17:17 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 28 May 2014 15:17:17 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Use GIL related code from llsupport Message-ID: <20140528131717.59ED41C06C0@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71750:90b32450ce0e Date: 2014-05-28 12:55 +0000 http://bitbucket.org/pypy/pypy/changeset/90b32450ce0e/ Log: Use GIL related code from llsupport diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -727,29 +727,6 @@ operations = newoperations return operations - @staticmethod - def _release_gil_shadowstack(): - before = rffi.aroundstate.before - if before: - before() - - @staticmethod - def _reacquire_gil_shadowstack(): - after = rffi.aroundstate.after - if after: - after() - - _NOARG_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) - - def _build_release_gil(self, gcrootmap): - assert gcrootmap.is_shadow_stack - releasegil_func = llhelper(self._NOARG_FUNC, - self._release_gil_shadowstack) - reacqgil_func = llhelper(self._NOARG_FUNC, - self._reacquire_gil_shadowstack) - self.releasegil_addr = rffi.cast(lltype.Signed, releasegil_func) - self.reacqgil_addr = rffi.cast(lltype.Signed, reacqgil_func) - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): clt = CompiledLoopToken(self.cpu, looptoken.number) clt.allgcrefs = [] From noreply at buildbot.pypy.org Wed May 28 15:17:18 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 28 May 2014 15:17:18 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Make tests run with shared assembler code from llsupport Message-ID: <20140528131718.AB22C1C06C0@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71751:126fc76ad99d Date: 2014-05-28 12:57 +0000 http://bitbucket.org/pypy/pypy/changeset/126fc76ad99d/ Log: Make tests run with shared assembler code from llsupport diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -70,7 +70,7 @@ def high(w): return (w >> 16) & 0x0000FFFF -class AssemblerPPC(OpAssembler): +class AssemblerPPC(OpAssembler, BaseAssembler): ENCODING_AREA = FORCE_INDEX_OFS OFFSET_SPP_TO_GPR_SAVE_AREA = (FORCE_INDEX + FLOAT_INT_CONVERSION @@ -325,6 +325,18 @@ arglocs.append(loc) return arglocs[:] + # TODO + def _build_failure_recovery(self, exc, withfloats=False): + pass + + # TODO + def build_frame_realloc_slowpath(self): + pass + + # TODO + def _build_cond_call_slowpath(self, supports_floats, callee_only): + pass + def _build_malloc_slowpath(self): mc = PPCBuilder() frame_size = (len(r.MANAGED_FP_REGS) * WORD @@ -489,7 +501,8 @@ self.write_64_bit_func_descr(rawstart, rawstart+3*WORD) self.stack_check_slowpath = rawstart - def _build_wb_slowpath(self, withcards, withfloats=False): + # TODO: see what need to be done when for_frame is True + def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): descr = self.cpu.gc_ll_descr.write_barrier_descr if descr is None: return @@ -690,7 +703,9 @@ allblocks) self.max_stack_params = 0 self.target_tokens_currently_compiling = {} - return operations + self.frame_depth_to_patch = [] + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) + return operations # do we really need this return? def setup_once(self): BaseAssembler.setup_once(self) From noreply at buildbot.pypy.org Wed May 28 15:46:25 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 May 2014 15:46:25 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add some no-jit performance graphs Message-ID: <20140528134625.6ABB21C010D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5278:8da07b9a12bc Date: 2014-05-28 15:47 +0200 http://bitbucket.org/pypy/extradoc/changeset/8da07b9a12bc/ Log: add some no-jit performance graphs diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1095,20 +1095,17 @@ we cannot compete well even with CPython, we later show JIT benchmarks where we see the unstable performance but also that we can still scale. (with more work we can use our STM system to parallelise jitted code -too)} +too)} See figure \ref{fig:performance-nojit} -more real benchmarks comparing multiple implementations: -\begin{itemize}[noitemsep] -\item pypy -\item pypy-jit -\item pypy-stm -\item pypy-stm-jit -\item cpython -\item jython -\end{itemize} +% TODO: pypy-nostm, Jython? +\begin{figure}[h] + \centering + \includegraphics[width=1\columnwidth]{plots/performance_nojit.pdf} + \caption{Comparing runtime between interpreters without a JIT\label{fig:performance-nojit}} +\end{figure} -% TODO: Jython +% TODO: Jython, compare to cpython? or just jython as common baseline with no-jit? \remi{Some benchmarks (figure \ref{fig:performance-jit} with enabled JIT show that we can be competitive with the other solutions. It also shows that more work is needed in that area to make performance more diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf index 99fa8230f1a046efb2fe92225955fc00178fdaa0..5c6937be3742a98c828e64732cc1629cea90bbae GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/performance_nojit.pdf b/talk/dls2014/paper/plots/performance_nojit.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6e5adc65ab8b4cdd8fa8f4e3cbd18757980afb93 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py --- a/talk/dls2014/paper/plots/plot_performance.py +++ b/talk/dls2014/paper/plots/plot_performance.py @@ -29,7 +29,7 @@ # import pprint - slow as hell # threads -ts = range(1,5) + interps_styles = { "pypy-stm-jit": 'r-', @@ -126,13 +126,13 @@ -def plot_speedups(plt): +def plot_speedups(plt, w, h, benchs, interps_styles): import numpy as np from collections import OrderedDict fig = plt.figure() + ts = range(1,5) # threads legend = OrderedDict() - w, h = 2, 3 axs = {} for i, (name, contestants) in enumerate(benchs.items()): if i >= w: @@ -180,7 +180,7 @@ global fig print "Draw..." - legend = plot_speedups(plt) + legend = plot_speedups(plt, 2, 3, benchs, interps_styles) #axs[0].set_ylim(0, len(x)) #ax.set_yticks([r+0.5 for r in range(len(logs))]) diff --git a/talk/dls2014/paper/plots/plot_performance_nojit.py b/talk/dls2014/paper/plots/plot_performance_nojit.py new file mode 100755 --- /dev/null +++ b/talk/dls2014/paper/plots/plot_performance_nojit.py @@ -0,0 +1,177 @@ +#!/usr/bin/python + +# benchmarks-repo at 0d81c9b1ec8e + +# for now: avg & stddev of the best + +# pypy-c-paper-nojit bench.py -k5 raytrace/raytrace.py 1-4 256 256 +# pypy-c-paper-nojit bench.py -k5 btree/btree.py 1-4 500000 +# pypy-c-paper-nojit bench.py -k5 skiplist/skiplist.py 1-4 200000 +# pypy-c-paper-nojit bench.py -k5 threadworms/threadworms.py 1-4 500000 +# pypy-c-paper-nojit bench.py -k5 mandelbrot/mandelbrot.py 1-4 64 512 512 +# pypy-c-paper-nojit multithread-richards.py 30 1-4 # report runtime + + +from plot_performance import plot_speedups +import matplotlib +import os +import sys +matplotlib.use('gtkagg') + +from matplotlib import rc +#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) +## for Palatino and other serif fonts use: +rc('font',**{'family':'serif','serif':['Palatino']}) +rc('text', usetex=True) + +args = None +import matplotlib.pyplot as plt +# import pprint - slow as hell + +# threads + + +interps_styles = { + "pypy-stm-nojit": 'r-', + "cpython": 'b--', + "best": "k:" +} + +benchs = { + "raytrace":{ + "pypy-stm-nojit":[ + [8.3], + [4.33], + [3.74], + [3.08] + ], + "cpython":[ + [2.5], + [2.7], + [2.75], + [2.84] + ]}, + + "btree":{ + "pypy-stm-nojit":[ + [8.3], + [4.9], + [3.9,4.3], + [4.4,4.0,4.3] + ], + "cpython":[ + [1.93], + [5.76], + [5.91], + [6.03] + ]}, + + "skiplist":{ + "pypy-stm-nojit":[ + [5.8], + [3.9], + [3.22,4.2,3.5], + [3.5,3.44,4.3] + ], + "cpython":[ + [3.3], + [4.9], + [5.0], + [5.1] + ]}, + + "threadworms":{ + "pypy-stm-nojit":[ + [4.8], + [2.7], + [2.0,2.2,2.1], + [2.1,2.3,2.2] + ], + "cpython":[ + [1.64], + [5], + [5.2], + [5.37] + ]}, + + "mandelbrot":{ + "pypy-stm-nojit":[ + [5.35], + [2.8], + [1.96,2.2], + [2.33,1.97] + ], + "cpython":[ + [1.65], + [2.4], + [2.4], + [2.5] + ]}, + + "richards":{ + "pypy-stm-nojit":[ + [11.2], + [6.1], + [5.4,4.9], + [4.8,4.9,5] + ], + "cpython":[ + [2.5], + [3.87], + [4.02], + [4.13] + ]} +} + + + + +def main(): + global fig + + print "Draw..." + legend = plot_speedups(plt, 2, 3, benchs, interps_styles) + + #axs[0].set_ylim(0, len(x)) + #ax.set_yticks([r+0.5 for r in range(len(logs))]) + #ax.set_yticklabels(range(1, len(logs)+1)) + #axs[0].set_xticks([]) + + # def label_format(x, pos): + # return "%.2f" % (abs((x - left) * 1e-6), ) + # major_formatter = matplotlib.ticker.FuncFormatter(label_format) + # axs[0].xaxis.set_major_formatter(major_formatter) + + #ax.set_title("Memory Usage in Richards") + + plt.draw() + #plt.show() + print "Drawn." + + file_name = "performance_nojit.pdf" + plt.savefig(file_name, format='pdf', + bbox_extra_artists=(legend,), + bbox_inches='tight', pad_inches=0) + + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description='Plot stm log files') + parser.add_argument('--figure-size', default='7x8', + help='set figure size in inches: format=6x4') + parser.add_argument('--font-size', default='10.0', + help='set font size in pts: 10.0') + parser.add_argument('--png-dpi', default='300', + help='set dpi of png output: 300') + + + args = parser.parse_args() + matplotlib.rcParams.update( + {'figure.figsize': tuple(map(int, args.figure_size.split('x'))), + 'font.size': float(args.font_size), + 'savefig.dpi': int(args.png_dpi), + }) + + + main() From noreply at buildbot.pypy.org Wed May 28 16:26:33 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 28 May 2014 16:26:33 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Add _push_all_regs_to_jitframe and _pop_all_regs_from_jitframe Message-ID: <20140528142633.D0BD01C06C0@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71752:3fe6019555b7 Date: 2014-05-28 14:25 +0000 http://bitbucket.org/pypy/pypy/changeset/3fe6019555b7/ Log: Add _push_all_regs_to_jitframe and _pop_all_regs_from_jitframe They still need to be fixed for fpr's diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -199,6 +199,47 @@ recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed] * 3, lltype.Signed)) + # TODO: see with we really need the ignored_regs argument + def _push_all_regs_to_jitframe(self, mc, ignored_regs, withfloats, + callee_only=False): + base_ofs = self.cpu.get_baseofs_of_frame_field() + if callee_only: + # Only push registers used to pass arguments to the callee + regs = r.VOLATILES + else: + regs = r.ALL_REGS + # For now, just push all regs to the jitframe + for i, reg in enumerate(regs): + # XXX should we progress to higher addresses? + mc.store_reg(reg, base_ofs - (i * WORD)) + + if withfloats: + if callee_only: + regs = r.VOLATILES_FLOAT + else: + regs = r.ALL_FLOAT_REGS + for i, reg in enumerate(regs): + pass # TODO find or create the proper store indexed for fpr's + + def _pop_all_regs_from_jitframe(self, mc, ignored_regs, withfloats, + callee_only=False): + base_ofs = self.cpu.get_baseofs_of_frame_field() + if callee_only: + regs = r.VOLATILES + else: + regs = r.ALL_REGS + for i, reg in enumerate(regs): + # XXX should we progress to higher addressess + mc.load_from_addr(reg, base_ofs - (i * WORD)) + + if withfloats: + if callee_only: + regs = r.VOLATILES_FLOAT + else: + regs = r.ALL_FLOAT_REGS + for i, reg in enumerate(regs): + pass # TODO find or create the proper load indexed for fpr's + @rgc.no_collect def decode_registers_and_descr(self, mem_loc, spp, registers, fp_registers): """Decode locations encoded in memory at mem_loc and write the values From noreply at buildbot.pypy.org Wed May 28 17:00:43 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 May 2014 17:00:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add jython to the mix Message-ID: <20140528150043.2FD271D2837@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5279:d928148e310e Date: 2014-05-28 17:01 +0200 http://bitbucket.org/pypy/extradoc/changeset/d928148e310e/ Log: add jython to the mix diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1091,6 +1091,16 @@ \subsection{Performance Benchmarks\label{sec:performance-bench}} +% To isolate factors we look at performance w/o JIT and perf w JIT. +% w/o JIT: +% - it scales +% - slower than reference CPython +% - gil scales negatively +% w JIT: +% - it sometimes still scales (more work needed) +% - changed working set size because generally ~50x faster +% - competing w our own JIT on non-stm a challenge +% - gil scales negatively \remi{For performance we first look at no-JIT behaviour of STM. Since we cannot compete well even with CPython, we later show JIT benchmarks where we see the unstable performance but also that we can still scale. diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf index 5c6937be3742a98c828e64732cc1629cea90bbae..77bf2dcfa24532e9526c848ceba949109221dd2a GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/performance_nojit.pdf b/talk/dls2014/paper/plots/performance_nojit.pdf index 6e5adc65ab8b4cdd8fa8f4e3cbd18757980afb93..3f0d4de4954fcf4899ec81cc72cfac591c344941 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py --- a/talk/dls2014/paper/plots/plot_performance.py +++ b/talk/dls2014/paper/plots/plot_performance.py @@ -32,9 +32,10 @@ interps_styles = { - "pypy-stm-jit": 'r-', - "pypy-jit": 'b--', - "best": "k:" + "pypy-stm-jit": {'fmt':'r-'}, + "pypy-jit": {'fmt':'b', 'dashes':(1,1)}, + "jython": {'fmt':'m', 'dashes':(2, 5)}, + "best": {'fmt':"k:"} # only fmt allowed } benchs = { @@ -156,12 +157,12 @@ best_y = new_y artist = ax.errorbar(ts, y, yerr=yerr, - fmt=interps_styles[interp]) + **interps_styles[interp]) if interp not in legend: legend[interp] = artist legend["best"], = ax.plot(ts, [best_y] * len(ts), - interps_styles["best"]) + interps_styles["best"]['fmt']) if i // w == h-1: ax.set_xlim(0, 5) @@ -172,7 +173,7 @@ ax.set_title(name) return axs[w*(h-1)].legend(tuple(legend.values()), tuple(legend.keys()), - ncol=3, + ncol=4, loc=(0,-0.4)) diff --git a/talk/dls2014/paper/plots/plot_performance_nojit.py b/talk/dls2014/paper/plots/plot_performance_nojit.py --- a/talk/dls2014/paper/plots/plot_performance_nojit.py +++ b/talk/dls2014/paper/plots/plot_performance_nojit.py @@ -28,15 +28,16 @@ import matplotlib.pyplot as plt # import pprint - slow as hell -# threads - interps_styles = { - "pypy-stm-nojit": 'r-', - "cpython": 'b--', - "best": "k:" + "pypy-stm-nojit": {'fmt':'r-'}, + "cpython": {'fmt':'b', 'dashes':(1,1)}, + "jython": {'fmt':'m', 'dashes':(2, 5)}, + "best": {'fmt':"k:"} # only fmt allowed } + + benchs = { "raytrace":{ "pypy-stm-nojit":[ @@ -50,6 +51,12 @@ [2.7], [2.75], [2.84] + ], + "jython":[ + [2.74,2.75], + [2.9,3.1,3.0], + [2.89,3.01,2.95], + [3.0,2.99,2.97] ]}, "btree":{ @@ -64,6 +71,12 @@ [5.76], [5.91], [6.03] + ], + "jython":[ + [1.76,1.84], + [2.60,2.46,2.6], + [2.56,2.6,2.51], + [2.57,2.52,2.48] ]}, "skiplist":{ @@ -78,6 +91,12 @@ [4.9], [5.0], [5.1] + ], + "jython":[ + [1.38,1.33,1.47,1.40], + [1.8,1.77,1.81], + [1.81,1.79,1.88], + [1.99,1.92,1.74,1.84] ]}, "threadworms":{ @@ -92,6 +111,12 @@ [5], [5.2], [5.37] + ], + "jython":[ + [2.73,2.38,2.63,2.4], + [3.0,2.87,3.3,3.1], + [3.35,3.22,3.19], + [3.19,3.37,3.26,3.36] ]}, "mandelbrot":{ @@ -106,6 +131,12 @@ [2.4], [2.4], [2.5] + ], + "jython":[ + [5.56,5.61,5.59,5.55], + [2.84,3,2.8,2.96], + [2.13,2.03,2.04,2.11], + [1.8,1.74,1.8,1.88] ]}, "richards":{ @@ -120,6 +151,12 @@ [3.87], [4.02], [4.13] + ], + "jython":[ + [3.39,3.31,3.7], + [2.32,1.95,2.18], + [1.86,1.66], + [1.49,1.63,1.59] ]} } From noreply at buildbot.pypy.org Wed May 28 17:01:06 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 28 May 2014 17:01:06 +0200 (CEST) Subject: [pypy-commit] benchmarks default: jython compat Message-ID: <20140528150106.A52691D2837@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r262:5bc3db97089a Date: 2014-05-28 17:02 +0200 http://bitbucket.org/pypy/benchmarks/changeset/5bc3db97089a/ Log: jython compat diff --git a/multithread/mandelbrot/mandelbrot.py b/multithread/mandelbrot/mandelbrot.py --- a/multithread/mandelbrot/mandelbrot.py +++ b/multithread/mandelbrot/mandelbrot.py @@ -52,14 +52,14 @@ return res -def run(threads=2, stripes=16): +def run(threads=2, stripes=64, w=4096, h=4096): global out_image threads = int(threads) stripes = int(stripes) assert stripes >= threads ar, ai = -2.0, -1.5 br, bi = 1.0, 1.5 - width, height = 4096, 4096 + width, height = int(w), int(h) set_thread_pool(ThreadPool(threads)) step = (bi - ai) / stripes diff --git a/multithread/multithread-richards.py b/multithread/multithread-richards.py --- a/multithread/multithread-richards.py +++ b/multithread/multithread-richards.py @@ -106,13 +106,13 @@ self.task_waiting = False self.task_holding = False return self - + def waitingWithPacket(self): self.packet_pending = True self.task_waiting = True self.task_holding = False return self - + def isPacketPending(self): return self.packet_pending @@ -236,7 +236,7 @@ if t is None: raise Exception("Bad task id %d" % id) return t - + # DeviceTask @@ -310,7 +310,7 @@ else: i.control = i.control/2 ^ 0xd008 return self.release(I_DEVB) - + # WorkTask @@ -372,7 +372,7 @@ self.finished_lock.acquire() def run_and_unlock(self, to_do): - os.write(1, 'running...\n') + print 'running...' iterations = 0 self.result = True while 1: @@ -382,7 +382,7 @@ break iterations += 1 self.result = self.run() - os.write(1, 'done, iterations=%d, result=%r\n' % (iterations, self.result)) + print 'done, iterations=%d, result=%r' % (iterations, self.result) self.finished_lock.release() def run(self): @@ -415,7 +415,7 @@ taskWorkArea) DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec(), taskWorkArea) - + schedule(taskWorkArea) if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246: From noreply at buildbot.pypy.org Wed May 28 18:13:42 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 28 May 2014 18:13:42 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't derive SomeNone from SomePBC Message-ID: <20140528161342.3BD0B1C06C0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71753:36c1c430d065 Date: 2014-05-28 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/36c1c430d065/ Log: Don't derive SomeNone from SomePBC diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -453,9 +453,6 @@ attr = s_attr.const descs = list(pbc.descriptions) - if not descs: - return s_ImpossibleValue - first = descs[0] if len(descs) == 1: return first.s_read_attribute(attr) @@ -496,8 +493,6 @@ annotations). """ descs = list(pbc.descriptions) - if not descs: - return s_ImpossibleValue first = descs[0] first.mergecallfamilies(*descs[1:]) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -513,7 +513,7 @@ else: return kt.__name__ -class SomeNone(SomePBC): +class SomeNone(SomeObject): can_be_None = True subset_of = None knowntype = type(None) diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -106,12 +106,11 @@ for i, argtype in enumerate(self.argtypes): if isinstance(argtype, (types.FunctionType, types.MethodType)): argtype = argtype(*inputcells) - if isinstance(argtype, lltype.LowLevelType) and\ - argtype is lltype.Void: + if argtype is lltype.Void: # XXX the mapping between Void and annotation # is not quite well defined s_input = inputcells[i] - assert isinstance(s_input, annmodel.SomePBC) + assert isinstance(s_input, (annmodel.SomePBC, annmodel.SomeNone)) assert s_input.is_constant() args_s.append(s_input) elif argtype is None: diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -760,9 +760,16 @@ def bind_callables_under(self, classdef, name): return self + def getattr(self, s_attr): + return s_ImpossibleValue + getattr.can_only_throw = [] + def setattr(self, s_attr, s_value): return None + def call(self, args): + return s_ImpossibleValue + def bool_behavior(self, s): s.const = False diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -56,6 +56,9 @@ assert s_obj.is_constant(), "ambiguous low-level helper specialization" key.append(KeyComp(s_obj.const)) new_args_s.append(s_obj) + elif isinstance(s_obj, annmodel.SomeNone): + key.append(KeyComp(None)) + new_args_s.append(s_obj) else: new_args_s.append(annmodel.not_const(s_obj)) try: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -885,7 +885,7 @@ s_value = rtyper.binding(v, default=annmodel.s_None) if not s_value.is_constant(): raise TyperError("non-constant variable of type Void") - if not isinstance(s_value, annmodel.SomePBC): + if not isinstance(s_value, (annmodel.SomePBC, annmodel.SomeNone)): raise TyperError("non-PBC Void argument: %r", (s_value,)) args_s.append(s_value) else: From noreply at buildbot.pypy.org Wed May 28 18:13:43 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 28 May 2014 18:13:43 +0200 (CEST) Subject: [pypy-commit] pypy default: remove unused attributes of SomeNone Message-ID: <20140528161343.8DE941C06C0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71754:042a8399aa2c Date: 2014-05-28 17:13 +0100 http://bitbucket.org/pypy/pypy/changeset/042a8399aa2c/ Log: remove unused attributes of SomeNone diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -514,18 +514,12 @@ return kt.__name__ class SomeNone(SomeObject): - can_be_None = True - subset_of = None knowntype = type(None) const = None def __init__(self): pass - @property - def descriptions(self): - return set() - def is_constant(self): return True From noreply at buildbot.pypy.org Wed May 28 18:32:51 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 28 May 2014 18:32:51 +0200 (CEST) Subject: [pypy-commit] pypy var-in-Some: hg merge default Message-ID: <20140528163251.7735B1D2CA8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: var-in-Some Changeset: r71755:6cdbe0c23284 Date: 2014-05-28 17:32 +0100 http://bitbucket.org/pypy/pypy/changeset/6cdbe0c23284/ Log: hg merge default diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -25,13 +25,13 @@ on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally - try: - import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - except ImportError: - print('sphinx_rtd_theme is not installed') - html_theme = 'default' + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' # otherwise, readthedocs.org uses their theme by default, so no need to specify it diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -11,3 +11,4 @@ .. branch: release-2.3.x +.. branch: unify-call-ops diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -613,6 +613,8 @@ array._charbuf_stop() def getslice(self, start, stop, step, size): + if size == 0: + return '' if step == 1: data = self.array._charbuf_start() try: diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1029,6 +1029,9 @@ raises(TypeError, "a[MyInt(0)]") raises(TypeError, "a[MyInt(0):MyInt(5)]") + def test_fresh_array_buffer_str(self): + assert str(buffer(self.array('i'))) == '' + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -208,6 +208,9 @@ @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): + # XXX XXX XXX THIS IS A VERY MINIMAL IMPLEMENTATION THAT WILL HAPPILY + # DEADLOCK IF CALLED TWICE ON THE SAME THREAD, OR CRASH IF CALLED IN A + # NEW THREAD. We should very carefully follow what CPython does instead. if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() @@ -215,6 +218,7 @@ @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): + # XXX XXX XXX We should very carefully follow what CPython does instead. if rffi.aroundstate.before: # Before external call is after running Python rffi.aroundstate.before() diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -748,11 +748,11 @@ self.lockcounter = 0 def lock_held_by_someone_else(self): - return self.lockowner is not None and not self.lock_held() + me = self.space.getexecutioncontext() # used as thread ident + return self.lockowner is not None and self.lockowner is not me - def lock_held(self): - me = self.space.getexecutioncontext() # used as thread ident - return self.lockowner is me + def lock_held_by_anyone(self): + return self.lockowner is not None def acquire_lock(self): # this function runs with the GIL acquired so there is no race diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -165,7 +165,7 @@ def lock_held(space): if space.config.objspace.usemodules.thread: - return space.wrap(importing.getimportlock(space).lock_held()) + return space.wrap(importing.getimportlock(space).lock_held_by_anyone()) else: return space.w_False diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -15,6 +15,7 @@ 'empty': 'ctors.zeros', 'empty_like': 'ctors.empty_like', 'fromstring': 'ctors.fromstring', + 'frombuffer': 'ctors.frombuffer', 'concatenate': 'arrayops.concatenate', 'count_nonzero': 'arrayops.count_nonzero', diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop @@ -191,3 +192,62 @@ return _fromstring_bin(space, s, count, length, dtype) else: return _fromstring_text(space, s, count, sep, length, dtype) + + +def _getbuffer(space, w_buffer): + try: + return space.writebuf_w(w_buffer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + return space.readbuf_w(w_buffer) + + + at unwrap_spec(count=int, offset=int) +def frombuffer(space, w_buffer, w_dtype=None, count=-1, offset=0): + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero in type") + + try: + buf = _getbuffer(space, w_buffer) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + w_buffer = space.getattr(w_buffer, space.wrap('__buffer__')) + buf = _getbuffer(space, w_buffer) + + ts = buf.getlength() + if offset < 0 or offset > ts: + raise oefmt(space.w_ValueError, + "offset must be non-negative and no greater than " + "buffer length (%d)", ts) + + s = ts - offset + if offset: + buf = SubBuffer(buf, offset, s) + + n = count + itemsize = dtype.elsize + assert itemsize > 0 + if n < 0: + if s % itemsize != 0: + raise oefmt(space.w_ValueError, + "buffer size must be a multiple of element size") + n = s / itemsize + else: + if s < n * itemsize: + raise oefmt(space.w_ValueError, + "buffer is smaller than requested size") + + try: + storage = buf.get_raw_address() + except ValueError: + a = W_NDimArray.from_shape(space, [n], dtype=dtype) + loop.fromstring_loop(space, a, dtype, itemsize, buf.as_str()) + return a + else: + writable = not buf.readonly + return W_NDimArray.from_shape_and_storage(space, [n], storage, dtype=dtype, + w_base=w_buffer, writable=writable) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3132,6 +3132,8 @@ class AppTestSupport(BaseNumpyAppTest): + spaceconfig = {'usemodules': ['micronumpy', 'array']} + def setup_class(cls): import struct BaseNumpyAppTest.setup_class.im_func(cls) @@ -3142,6 +3144,44 @@ cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) + def test_frombuffer(self): + import numpy as np + exc = raises(AttributeError, np.frombuffer, None) + assert str(exc.value) == "'NoneType' object has no attribute '__buffer__'" + exc = raises(AttributeError, np.frombuffer, memoryview(self.data)) + assert str(exc.value) == "'memoryview' object has no attribute '__buffer__'" + exc = raises(ValueError, np.frombuffer, self.data, 'S0') + assert str(exc.value) == "itemsize cannot be zero in type" + exc = raises(ValueError, np.frombuffer, self.data, offset=-1) + assert str(exc.value) == "offset must be non-negative and no greater than buffer length (32)" + exc = raises(ValueError, np.frombuffer, self.data, count=100) + assert str(exc.value) == "buffer is smaller than requested size" + for data in [self.data, buffer(self.data)]: + a = np.frombuffer(data) + for i in range(4): + assert a[i] == i + 1 + + import array + data = array.array('c', 'testing') + a = np.frombuffer(data, 'c') + assert a.base is data + a[2] = 'Z' + assert data.tostring() == 'teZting' + + data = buffer(data) + a = np.frombuffer(data, 'c') + assert a.base is data + exc = raises(ValueError, "a[2] = 'Z'") + assert str(exc.value) == "assignment destination is read-only" + + class A(object): + __buffer__ = 'abc' + + data = A() + a = np.frombuffer(data, 'c') + #assert a.base is data.__buffer__ + assert a.tostring() == 'abc' + def test_fromstring(self): import sys from numpypy import fromstring, dtype diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -13,7 +13,7 @@ """NOT_RPYTHON""" # because parent __init__ isn't if space.config.translating: del self.__class__.interpleveldefs['pypy_getudir'] - super(Module, self).__init__(space, w_name) + super(Module, self).__init__(space, w_name) self.recursionlimit = 100 self.w_default_encoder = None self.defaultencoding = "ascii" @@ -21,13 +21,13 @@ self.debug = True interpleveldefs = { - '__name__' : '(space.wrap("sys"))', - '__doc__' : '(space.wrap("PyPy sys module"))', + '__name__' : '(space.wrap("sys"))', + '__doc__' : '(space.wrap("PyPy sys module"))', - 'platform' : 'space.wrap(sys.platform)', + 'platform' : 'space.wrap(sys.platform)', 'maxint' : 'space.wrap(sys.maxint)', 'maxsize' : 'space.wrap(sys.maxint)', - 'byteorder' : 'space.wrap(sys.byteorder)', + 'byteorder' : 'space.wrap(sys.byteorder)', 'maxunicode' : 'space.wrap(vm.MAXUNICODE)', 'stdin' : 'state.getio(space).w_stdin', '__stdin__' : 'state.getio(space).w_stdin', @@ -36,35 +36,35 @@ 'stderr' : 'state.getio(space).w_stderr', '__stderr__' : 'state.getio(space).w_stderr', 'pypy_objspaceclass' : 'space.wrap(repr(space))', - #'prefix' : # added by pypy_initial_path() when it + #'prefix' : # added by pypy_initial_path() when it #'exec_prefix' : # succeeds, pointing to trunk or /usr 'path' : 'state.get(space).w_path', - 'modules' : 'state.get(space).w_modules', + 'modules' : 'state.get(space).w_modules', 'argv' : 'state.get(space).w_argv', 'py3kwarning' : 'space.w_False', - 'warnoptions' : 'state.get(space).w_warnoptions', + 'warnoptions' : 'state.get(space).w_warnoptions', 'builtin_module_names' : 'space.w_None', 'pypy_getudir' : 'state.pypy_getudir', # not translated 'pypy_find_stdlib' : 'initpath.pypy_find_stdlib', 'pypy_find_executable' : 'initpath.pypy_find_executable', 'pypy_resolvedirof' : 'initpath.pypy_resolvedirof', - '_getframe' : 'vm._getframe', - '_current_frames' : 'currentframes._current_frames', - 'setrecursionlimit' : 'vm.setrecursionlimit', - 'getrecursionlimit' : 'vm.getrecursionlimit', - 'setcheckinterval' : 'vm.setcheckinterval', - 'getcheckinterval' : 'vm.getcheckinterval', - 'exc_info' : 'vm.exc_info', - 'exc_clear' : 'vm.exc_clear', + '_getframe' : 'vm._getframe', + '_current_frames' : 'currentframes._current_frames', + 'setrecursionlimit' : 'vm.setrecursionlimit', + 'getrecursionlimit' : 'vm.getrecursionlimit', + 'setcheckinterval' : 'vm.setcheckinterval', + 'getcheckinterval' : 'vm.getcheckinterval', + 'exc_info' : 'vm.exc_info', + 'exc_clear' : 'vm.exc_clear', 'settrace' : 'vm.settrace', 'gettrace' : 'vm.gettrace', 'setprofile' : 'vm.setprofile', 'getprofile' : 'vm.getprofile', 'call_tracing' : 'vm.call_tracing', 'getsizeof' : 'vm.getsizeof', - - 'executable' : 'space.wrap("py.py")', + + 'executable' : 'space.wrap("py.py")', 'api_version' : 'version.get_api_version(space)', 'version_info' : 'version.get_version_info(space)', 'version' : 'version.get_version(space)', @@ -73,14 +73,14 @@ '_mercurial' : 'version.get_repo_info(space)', 'hexversion' : 'version.get_hexversion(space)', - 'displayhook' : 'hook.displayhook', - '__displayhook__' : 'hook.__displayhook__', + 'displayhook' : 'hook.displayhook', + '__displayhook__' : 'hook.__displayhook__', 'meta_path' : 'space.wrap([])', 'path_hooks' : 'space.wrap([])', 'path_importer_cache' : 'space.wrap({})', 'dont_write_bytecode' : 'space.w_False', - - 'getdefaultencoding' : 'interp_encoding.getdefaultencoding', + + 'getdefaultencoding' : 'interp_encoding.getdefaultencoding', 'setdefaultencoding' : 'interp_encoding.setdefaultencoding', 'getfilesystemencoding' : 'interp_encoding.getfilesystemencoding', @@ -119,21 +119,21 @@ w_modules = self.get('modules') try: return space.getitem(w_modules, space.wrap(name)) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - return None + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + return None - def setmodule(self, w_module): + def setmodule(self, w_module): space = self.space w_name = self.space.getattr(w_module, space.wrap('__name__')) w_modules = self.get('modules') self.space.setitem(w_modules, w_name, w_module) def getdictvalue(self, space, attr): - """ specialize access to dynamic exc_* attributes. """ - value = MixedModule.getdictvalue(self, space, attr) - if value is not None: + """ specialize access to dynamic exc_* attributes. """ + value = MixedModule.getdictvalue(self, space, attr) + if value is not None: return value if attr == 'exc_type': operror = space.getexecutioncontext().sys_exc_info() @@ -153,7 +153,7 @@ return space.w_None else: return space.wrap(operror.get_traceback()) - return None + return None def get_w_default_encoder(self): if self.w_default_encoder is not None: diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -62,6 +62,28 @@ self.waitfor(lambda: done) assert done + def test_lock_held_by_another_thread(self): + import thread, imp + lock_held = thread.allocate_lock() + test_complete = thread.allocate_lock() + lock_released = thread.allocate_lock() + def other_thread(): + imp.acquire_lock() # 3 + assert imp.lock_held() + lock_held.release() # 4 + test_complete.acquire() # 7 + imp.release_lock() # 8 + lock_released.release() # 9 + lock_held.acquire() + test_complete.acquire() + lock_released.acquire() + # + thread.start_new_thread(other_thread, ()) # 1 + lock_held.acquire() # 2 + assert imp.lock_held() # 5 + test_complete.release() # 6 + lock_released.acquire() # 10 + class TestImportLock: def test_lock(self, space, monkeypatch): from pypy.module.imp.importing import getimportlock, importhook diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -5,7 +5,7 @@ from rpython.rlib.buffer import Buffer, SubBuffer from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty @@ -61,15 +61,6 @@ def getlength(self): return self.buf.getlength() - def getslice(self, start, stop): - if start < 0: - start = 0 - size = stop - start - if size < 0: - size = 0 - buf = SubBuffer(self.buf, start, size) - return W_MemoryView(buf) - def descr_tobytes(self, space): return space.wrap(self.as_str()) @@ -81,25 +72,25 @@ return space.newlist(result) def descr_getitem(self, space, w_index): - start, stop, step = space.decode_index(w_index, self.getlength()) + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") if step == 0: # index only return space.wrap(self.buf.getitem(start)) - res = self.getslice(start, stop) - return space.wrap(res) + else: + buf = SubBuffer(self.buf, start, size) + return W_MemoryView(buf) def descr_setitem(self, space, w_index, w_obj): if self.buf.readonly: - raise OperationError(space.w_TypeError, space.wrap( - "cannot modify read-only memory")) - start, stop, step, size = space.decode_index4(w_index, self.buf.getlength()) + raise oefmt(space.w_TypeError, "cannot modify read-only memory") + start, stop, step, size = space.decode_index4(w_index, self.getlength()) if step not in (0, 1): - raise OperationError(space.w_NotImplementedError, space.wrap("")) + raise oefmt(space.w_NotImplementedError, "") value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) if value.getlength() != size: - raise OperationError(space.w_ValueError, space.wrap( - "cannot modify size of memoryview object")) + raise oefmt(space.w_ValueError, + "cannot modify size of memoryview object") if step == 0: # index only self.buf.setitem(start, value.getitem(0)) elif step == 1: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -2,14 +2,14 @@ Binary operations between SomeValues. """ -import py +import operator from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, - SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, + SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, - SomeBuiltinMethod, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeSingleFloat, + SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, + SomeByteArray, SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -734,57 +734,51 @@ # mixing Nones with other objects -def _make_none_union(classname, constructor_args='', glob=None): - if glob is None: - glob = globals() - loc = locals() - source = py.code.Source(""" - class __extend__(pairtype(%(classname)s, SomePBC)): - def union((obj, pbc)): - if pbc.isNone(): - return %(classname)s(%(constructor_args)s) - else: - raise UnionError(pbc, obj) +class __extend__(pairtype(SomeObject, SomeNone)): + def union((obj, none)): + return obj.noneify() - class __extend__(pairtype(SomePBC, %(classname)s)): - def union((pbc, obj)): - if pbc.isNone(): - return %(classname)s(%(constructor_args)s) - else: - raise UnionError(pbc, obj) - """ % loc) - exec source.compile() in glob +class __extend__(pairtype(SomeNone, SomeObject)): + def union((none, obj)): + return obj.noneify() -_make_none_union('SomeInstance', 'classdef=obj.classdef, can_be_None=True') -_make_none_union('SomeString', 'no_nul=obj.no_nul, can_be_None=True') -_make_none_union('SomeUnicodeString', 'can_be_None=True') -_make_none_union('SomeList', 'obj.listdef') -_make_none_union('SomeOrderedDict', 'obj.dictdef') -_make_none_union('SomeDict', 'obj.dictdef') -_make_none_union('SomeWeakRef', 'obj.classdef') +class __extend__(pairtype(SomeImpossibleValue, SomeNone)): + def union((imp1, none)): + return s_None -# getitem on SomePBCs, in particular None fails +class __extend__(pairtype(SomeNone, SomeImpossibleValue)): + def union((none, imp2)): + return s_None + class __extend__(pairtype(SomePBC, SomeObject)): def getitem((pbc, o)): - if not pbc.isNone(): - raise AnnotatorError("getitem on %r" % pbc) + raise AnnotatorError("getitem on %r" % pbc) + + def setitem((pbc, o), s_value): + raise AnnotatorError("setitem on %r" % pbc) + +class __extend__(pairtype(SomeNone, SomeObject)): + def getitem((none, o)): return s_ImpossibleValue - def setitem((pbc, o), s_value): - if not pbc.isNone(): - raise AnnotatorError("setitem on %r" % pbc) + def setitem((none, o), s_value): + return None class __extend__(pairtype(SomePBC, SomeString)): def add((pbc, o)): - if not pbc.isNone(): - raise AnnotatorError('add on %r' % pbc) + raise AnnotatorError('add on %r' % pbc) + +class __extend__(pairtype(SomeNone, SomeString)): + def add((none, o)): return s_ImpossibleValue class __extend__(pairtype(SomeString, SomePBC)): def add((o, pbc)): - if not pbc.isNone(): - raise AnnotatorError('add on %r' % pbc) + raise AnnotatorError('add on %r' % pbc) + +class __extend__(pairtype(SomeString, SomeNone)): + def add((o, none)): return s_ImpossibleValue #_________________________________________ diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -18,7 +18,7 @@ from rpython.annotator.dictdef import DictDef from rpython.annotator import description from rpython.annotator.signature import annotationoftype -from rpython.annotator.argument import simple_args, complex_args +from rpython.annotator.argument import simple_args from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper import extregistry @@ -103,8 +103,9 @@ self.consider_call_site(call_op) for pbc, args_s in self.emulated_pbc_calls.itervalues(): - self.consider_call_site_for_pbc(pbc, 'simple_call', - args_s, s_ImpossibleValue, None) + args = simple_args(args_s) + self.consider_call_site_for_pbc(pbc, args, + s_ImpossibleValue, None) self.emulated_pbc_calls = {} finally: self.leave() @@ -152,16 +153,16 @@ args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s if isinstance(s_callable, SomePBC): s_result = binding(call_op.result, s_ImpossibleValue) - self.consider_call_site_for_pbc(s_callable, call_op.opname, args_s, + args = call_op.build_args(args_s) + self.consider_call_site_for_pbc(s_callable, args, s_result, call_op) - def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result, + def consider_call_site_for_pbc(self, s_callable, args, s_result, call_op): descs = list(s_callable.descriptions) if not descs: return family = descs[0].getcallfamily() - args = self.build_args(opname, args_s) s_callable.getKind().consider_call_site(self, family, descs, args, s_result, call_op) @@ -452,9 +453,6 @@ attr = s_attr.const descs = list(pbc.descriptions) - if not descs: - return s_ImpossibleValue - first = descs[0] if len(descs) == 1: return first.s_read_attribute(attr) @@ -495,8 +493,6 @@ annotations). """ descs = list(pbc.descriptions) - if not descs: - return s_ImpossibleValue first = descs[0] first.mergecallfamilies(*descs[1:]) @@ -562,12 +558,6 @@ assert self.annotator.binding(op.args[pos]) == s_type return op - def build_args(self, op, args_s): - if op == "simple_call": - return simple_args(args_s) - elif op == "call_args": - return complex_args(args_s) - def ondegenerated(self, what, s_value, where=None, called_from_graph=None): self.annotator.ondegenerated(what, s_value, where=where, called_from_graph=called_from_graph) diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -1,8 +1,9 @@ """ Type inference for user-defined classes. """ -from rpython.annotator.model import SomePBC, s_ImpossibleValue, unionof -from rpython.annotator.model import SomeInteger, SomeTuple, SomeString, AnnotatorError +from rpython.annotator.model import ( + SomePBC, SomeNone, s_ImpossibleValue, unionof, s_None, SomeInteger, + SomeTuple, SomeString, AnnotatorError) from rpython.annotator import description @@ -103,10 +104,10 @@ self.bookkeeper.annotator.reflowfromposition(position) # check for method demotion and after-the-fact method additions - if isinstance(s_newvalue, SomePBC): + if (isinstance(s_newvalue, SomePBC) and + not isinstance(s_newvalue, SomeNone)): attr = self.name - if (not s_newvalue.isNone() and - s_newvalue.getKind() == description.MethodDesc): + if s_newvalue.getKind() == description.MethodDesc: # is method if homedef.classdesc.read_attribute(attr, None) is None: if not homedef.check_missing_attribute_update(attr): @@ -351,8 +352,10 @@ if uplookup is not None: d.append(updesc.bind_self(self, flags)) - if d or pbc.can_be_None: + if d: return SomePBC(d, can_be_None=pbc.can_be_None) + elif pbc.can_be_None: + return s_None else: return s_ImpossibleValue diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -122,6 +122,9 @@ def can_be_none(self): return True + def noneify(self): + raise UnionError(self, s_None) + def nonnoneify(self): return self @@ -258,11 +261,17 @@ "Stands for an object which is known to be a string." knowntype = str + def noneify(self): + return SomeString(can_be_None=True, no_nul=self.no_nul) + class SomeUnicodeString(SomeStringOrUnicode): "Stands for an object which is known to be an unicode string" knowntype = unicode + def noneify(self): + return SomeUnicodeString(can_be_None=True, no_nul=self.no_nul) + class SomeByteArray(SomeStringOrUnicode): immutable = False @@ -313,6 +322,9 @@ def can_be_none(self): return True + def noneify(self): + return SomeList(self.listdef) + class SomeTuple(SomeObject): "Stands for a tuple of known length." @@ -358,6 +370,9 @@ else: return '{...%s...}' % (len(const),) + def noneify(self): + return type(self)(self.dictdef) + class SomeOrderedDict(SomeDict): try: from collections import OrderedDict as knowntype @@ -417,6 +432,9 @@ def nonnoneify(self): return SomeInstance(self.classdef, can_be_None=False) + def noneify(self): + return SomeInstance(self.classdef, can_be_None=True) + class SomePBC(SomeObject): """Stands for a global user instance, built prior to the analysis, @@ -424,36 +442,32 @@ immutable = True def __init__(self, descriptions, can_be_None=False, subset_of=None): + assert descriptions # descriptions is a set of Desc instances descriptions = set(descriptions) self.descriptions = descriptions self.can_be_None = can_be_None self.subset_of = subset_of self.simplify() - if self.isNone(): - self.knowntype = type(None) - self.const = None - else: - knowntype = reduce(commonbase, - [x.knowntype for x in descriptions]) - if knowntype == type(Exception): - knowntype = type - if knowntype != object: - self.knowntype = knowntype - if len(descriptions) == 1 and not can_be_None: - # hack for the convenience of direct callers to SomePBC(): - # only if there is a single object in descriptions - desc, = descriptions - if desc.pyobj is not None: - self.const = desc.pyobj - elif len(descriptions) > 1: - from rpython.annotator.description import ClassDesc - if self.getKind() is ClassDesc: - # a PBC of several classes: enforce them all to be - # built, without support for specialization. See - # rpython/test/test_rpbc.test_pbc_of_classes_not_all_used - for desc in descriptions: - desc.getuniqueclassdef() + knowntype = reduce(commonbase, [x.knowntype for x in descriptions]) + if knowntype == type(Exception): + knowntype = type + if knowntype != object: + self.knowntype = knowntype + if len(descriptions) == 1 and not can_be_None: + # hack for the convenience of direct callers to SomePBC(): + # only if there is a single object in descriptions + desc, = descriptions + if desc.pyobj is not None: + self.const = desc.pyobj + elif len(descriptions) > 1: + from rpython.annotator.description import ClassDesc + if self.getKind() is ClassDesc: + # a PBC of several classes: enforce them all to be + # built, without support for specialization. See + # rpython/test/test_rpbc.test_pbc_of_classes_not_all_used + for desc in descriptions: + desc.getuniqueclassdef() def any_description(self): return iter(self.descriptions).next() @@ -466,32 +480,26 @@ kinds.add(x.__class__) if len(kinds) > 1: raise AnnotatorError("mixing several kinds of PBCs: %r" % kinds) - if not kinds: - raise ValueError("no 'kind' on the 'None' PBC") return kinds.pop() def simplify(self): - if self.descriptions: - # We check that the set only contains a single kind of Desc instance - kind = self.getKind() - # then we remove unnecessary entries in self.descriptions: - # some MethodDescs can be 'shadowed' by others - if len(self.descriptions) > 1: - kind.simplify_desc_set(self.descriptions) - else: - assert self.can_be_None, "use s_ImpossibleValue" - - def isNone(self): - return len(self.descriptions) == 0 + # We check that the set only contains a single kind of Desc instance + kind = self.getKind() + # then we remove unnecessary entries in self.descriptions: + # some MethodDescs can be 'shadowed' by others + if len(self.descriptions) > 1: + kind.simplify_desc_set(self.descriptions) def can_be_none(self): return self.can_be_None def nonnoneify(self): - if self.isNone(): - return s_ImpossibleValue - else: - return SomePBC(self.descriptions, can_be_None=False) + return SomePBC(self.descriptions, can_be_None=False, + subset_of=self.subset_of) + + def noneify(self): + return SomePBC(self.descriptions, can_be_None=True, + subset_of=self.subset_of) def fmt_descriptions(self, pbis): if hasattr(self, 'const'): @@ -505,6 +513,23 @@ else: return kt.__name__ +class SomeNone(SomeObject): + knowntype = type(None) + const = None + + def __init__(self): + pass + + def is_constant(self): + return True + + def is_immutable_constant(self): + return True + + def nonnoneify(self): + return s_ImpossibleValue + + class SomeConstantType(SomePBC): can_be_None = False subset_of = None @@ -557,7 +582,7 @@ return False -s_None = SomePBC([], can_be_None=True) +s_None = SomeNone() s_Bool = SomeBool() s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() @@ -576,6 +601,9 @@ # 'classdef' is None for known-to-be-dead weakrefs. self.classdef = classdef + def noneify(self): + return SomeWeakRef(self.classdef) + # ____________________________________________________________ diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -106,12 +106,11 @@ for i, argtype in enumerate(self.argtypes): if isinstance(argtype, (types.FunctionType, types.MethodType)): argtype = argtype(*inputcells) - if isinstance(argtype, lltype.LowLevelType) and\ - argtype is lltype.Void: + if argtype is lltype.Void: # XXX the mapping between Void and annotation # is not quite well defined s_input = inputcells[i] - assert isinstance(s_input, annmodel.SomePBC) + assert isinstance(s_input, (annmodel.SomePBC, annmodel.SomeNone)) assert s_input.is_constant() args_s.append(s_input) elif argtype is None: diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -8,7 +8,7 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, - SomeFloat, SomeIterator, SomePBC, SomeType, s_ImpossibleValue, + SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue @@ -735,8 +735,7 @@ getattr.can_only_throw = [] def setattr(self, s_attr, s_value): - if not self.isNone(): - raise AnnotatorError("Cannot modify attribute of a pre-built constant") + raise AnnotatorError("Cannot modify attribute of a pre-built constant") def call(self, args): bookkeeper = getbookkeeper() @@ -747,19 +746,33 @@ return SomePBC(d, can_be_None=self.can_be_None) def bool_behavior(self, s): - if self.isNone(): - s.const = False - elif not self.can_be_None: + if not self.can_be_None: s.const = True def len(self): - if self.isNone(): - # this None could later be generalized into an empty list, - # whose length is the constant 0; so let's tentatively answer 0. - return immutablevalue(0) - else: - # This should probably never happen - raise AnnotatorError("Cannot call len on a pbc") + raise AnnotatorError("Cannot call len on a pbc") + +class __extend__(SomeNone): + def bind_callables_under(self, classdef, name): + return self + + def getattr(self, s_attr): + return s_ImpossibleValue + getattr.can_only_throw = [] + + def setattr(self, s_attr, s_value): + return None + + def call(self, args): + return s_ImpossibleValue + + def bool_behavior(self, s): + s.const = False + + def len(self): + # XXX: this None could later be generalized into an empty list, + # whose length is the constant 0; so let's tentatively answer 0. + return immutablevalue(0) #_________________________________________ # weakrefs diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -15,6 +15,7 @@ from rpython.flowspace.specialcase import register_flow_sc from rpython.annotator.model import ( SomeTuple, AnnotatorError, read_can_only_throw) +from rpython.annotator.argument import ArgumentsForTranslation from rpython.flowspace.specialcase import SPECIAL_CASES @@ -576,6 +577,9 @@ return sc(ctx, *args_w) return ctx.do_op(self) + def build_args(self, args_s): + return ArgumentsForTranslation(list(args_s)) + class CallArgs(SingleDispatchMixin, CallOp): opname = 'call_args' @@ -594,6 +598,10 @@ "should not call %r with keyword arguments" % (fn,)) return ctx.do_op(self) + def build_args(self, args_s): + return ArgumentsForTranslation.fromshape(args_s[0].const, + list(args_s[1:])) + # Other functions that get directly translated to SpaceOperators func2op[type] = op.type diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -63,7 +63,10 @@ 'AMD64': MODEL_X86, # win64 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, - }[mach] + }.get(mach) + + if result is None: + raise ProcessorAutodetectError, "unknown machine name %s" % mach # if result.startswith('x86'): if sys.maxint == 2**63-1: @@ -78,7 +81,9 @@ # if result.startswith('arm'): from rpython.jit.backend.arm.detect import detect_float - assert detect_float(), 'the JIT-compiler requires a vfp unit' + if not detect_float(): + raise ProcessorAutodetectError( + 'the JIT-compiler requires a vfp unit') # return result diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -9,6 +9,7 @@ QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.typesystem import getfunctionptr from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer from rpython.translator.backendopt.graphanalyze import DependencyTracker @@ -168,7 +169,7 @@ because it is not needed there; it is only used by the blackhole interp to really do the call corresponding to 'inline_call' ops. """ - fnptr = self.rtyper.type_system.getcallable(graph) + fnptr = getfunctionptr(graph) FUNC = lltype.typeOf(fnptr).TO assert self.rtyper.type_system.name == "lltypesystem" fnaddr = llmemory.cast_ptr_to_adr(fnptr) diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -1,10 +1,12 @@ import py + from rpython.flowspace.model import SpaceOperation, Constant, Variable from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.unsimplify import varoftype from rpython.rlib import jit +from rpython.jit.codewriter import support, call from rpython.jit.codewriter.call import CallControl -from rpython.jit.codewriter import support + class FakePolicy: def look_inside_graph(self, graph): @@ -151,18 +153,19 @@ # ____________________________________________________________ -def test_get_jitcode(): +def test_get_jitcode(monkeypatch): from rpython.jit.codewriter.test.test_flatten import FakeCPU class FakeRTyper: class annotator: translator = None class type_system: name = 'lltypesystem' - @staticmethod - def getcallable(graph): - F = lltype.FuncType([], lltype.Signed) - return lltype.functionptr(F, 'bar') - # + + def getfunctionptr(graph): + F = lltype.FuncType([], lltype.Signed) + return lltype.functionptr(F, 'bar') + + monkeypatch.setattr(call, 'getfunctionptr', getfunctionptr) cc = CallControl(FakeCPU(FakeRTyper())) class somegraph: name = "foo" diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -198,11 +198,11 @@ opnum = lastop.getopnum() args = lastop.getarglist() result = lastop.result - # If the INT_xxx_OVF was replaced with INT_xxx, then we can kill - # the GUARD_NO_OVERFLOW. - if (opnum == rop.INT_ADD or - opnum == rop.INT_SUB or - opnum == rop.INT_MUL): + # If the INT_xxx_OVF was replaced with INT_xxx or removed + # completely, then we can kill the GUARD_NO_OVERFLOW. + if (opnum != rop.INT_ADD_OVF and + opnum != rop.INT_SUB_OVF and + opnum != rop.INT_MUL_OVF): return # Else, synthesize the non overflowing op for optimize_default to # reuse, as well as the reverse op @@ -248,6 +248,9 @@ def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) + if v1 is v2: + self.make_constant_int(op.result, 0) + return resbound = v1.intbound.sub_bound(v2.intbound) if resbound.bounded(): op = op.copy_and_change(rop.INT_SUB) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3339,6 +3339,21 @@ """ self.optimize_loop(ops, expected) + def test_fold_int_sub_ovf_xx(self): + ops = """ + [i0] + i1 = int_sub_ovf(i0, i0) + guard_no_overflow() [] + escape(i1) + jump(i1) + """ + expected = """ + [] + escape(0) + jump() + """ + self.optimize_loop(ops, expected) + def test_fold_partially_constant_shift(self): ops = """ [i0] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -167,12 +167,11 @@ def make_result_of_lastop(self, resultbox): got_type = resultbox.type - # XXX disabled for now, conflicts with str_guard_value - #if not we_are_translated(): - # typeof = {'i': history.INT, - # 'r': history.REF, - # 'f': history.FLOAT} - # assert typeof[self.jitcode._resulttypes[self.pc]] == got_type + if not we_are_translated(): + typeof = {'i': history.INT, + 'r': history.REF, + 'f': history.FLOAT} + assert typeof[self.jitcode._resulttypes[self.pc]] == got_type target_index = ord(self.bytecode[self.pc-1]) if got_type == history.INT: self.registers_i[target_index] = resultbox @@ -1321,14 +1320,14 @@ self.metainterp.clear_exception() resbox = self.metainterp.execute_and_record_varargs(opnum, argboxes, descr=descr) - if resbox is not None: - self.make_result_of_lastop(resbox) - # ^^^ this is done before handle_possible_exception() because we - # need the box to show up in get_list_of_active_boxes() if pure and self.metainterp.last_exc_value_box is None and resbox: resbox = self.metainterp.record_result_of_call_pure(resbox) exc = exc and not isinstance(resbox, Const) if exc: + if resbox is not None: + self.make_result_of_lastop(resbox) + # ^^^ this is done before handle_possible_exception() because we + # need the box to show up in get_list_of_active_boxes() self.metainterp.handle_possible_exception() else: self.metainterp.assert_no_exception() diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -22,6 +22,11 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof INVALID_SOCKET = _c.INVALID_SOCKET +from rpython.rlib import jit +# Usage of @jit.dont_look_inside in this file is possibly temporary +# and only because some lltypes declared in _rsocket_rffi choke the +# JIT's codewriter right now (notably, FixedSizeArray). + def mallocbuf(buffersize): return lltype.malloc(rffi.CCHARP.TO, buffersize, flavor='raw') @@ -592,6 +597,7 @@ addrlen_p[0] = rffi.cast(_c.socklen_t, maxlen) return addr, addr.addr_p, addrlen_p + @jit.dont_look_inside def accept(self): """Wait for an incoming connection. Return (new socket fd, client address).""" @@ -724,6 +730,7 @@ return make_socket(fd, self.family, self.type, self.proto, SocketClass=SocketClass) + @jit.dont_look_inside def getpeername(self): """Return the address of the remote endpoint.""" address, addr_p, addrlen_p = self._addrbuf() @@ -738,6 +745,7 @@ address.addrlen = rffi.cast(lltype.Signed, addrlen) return address + @jit.dont_look_inside def getsockname(self): """Return the address of the local endpoint.""" address, addr_p, addrlen_p = self._addrbuf() @@ -752,6 +760,7 @@ address.addrlen = rffi.cast(lltype.Signed, addrlen) return address + @jit.dont_look_inside def getsockopt(self, level, option, maxlen): buf = mallocbuf(maxlen) try: @@ -771,6 +780,7 @@ lltype.free(buf, flavor='raw') return result + @jit.dont_look_inside def getsockopt_int(self, level, option): flag_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') try: @@ -828,6 +838,7 @@ rwbuffer.setslice(0, buf) return len(buf) + @jit.dont_look_inside def recvfrom(self, buffersize, flags=0): """Like recv(buffersize, flags) but also return the sender's address.""" diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -446,6 +446,9 @@ def rtyper_makekey(self): return self.__class__, + def noneify(self): + return self + class SomeUnicodeBuilder(SomeObject): def method_append(self, s_str): @@ -483,6 +486,9 @@ def rtyper_makekey(self): return self.__class__, + def noneify(self): + return self + class BaseEntry(object): def compute_result_annotation(self, s_init_size=None): @@ -506,29 +512,6 @@ use_unicode = True -class __extend__(pairtype(SomeStringBuilder, SomePBC)): - def union((sb, p)): - assert p.const is None - return SomeStringBuilder() - - -class __extend__(pairtype(SomePBC, SomeStringBuilder)): - def union((p, sb)): - assert p.const is None - return SomeStringBuilder() - - -class __extend__(pairtype(SomeUnicodeBuilder, SomePBC)): - def union((sb, p)): - assert p.const is None - return SomeUnicodeBuilder() - - -class __extend__(pairtype(SomePBC, SomeUnicodeBuilder)): - def union((p, sb)): - assert p.const is None - return SomeUnicodeBuilder() - #___________________________________________________________________ # Support functions for SomeString.no_nul diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -56,6 +56,9 @@ assert s_obj.is_constant(), "ambiguous low-level helper specialization" key.append(KeyComp(s_obj.const)) new_args_s.append(s_obj) + elif isinstance(s_obj, annmodel.SomeNone): + key.append(KeyComp(None)) + new_args_s.append(s_obj) else: new_args_s.append(annmodel.not_const(s_obj)) try: diff --git a/rpython/rtyper/callparse.py b/rpython/rtyper/callparse.py --- a/rpython/rtyper/callparse.py +++ b/rpython/rtyper/callparse.py @@ -31,7 +31,7 @@ getrinputs(rtyper, graph), getrresult(rtyper, graph)) -def callparse(rtyper, graph, hop, opname, r_self=None): +def callparse(rtyper, graph, hop, r_self=None): """Parse the arguments of 'hop' when calling the given 'graph'. """ rinputs = getrinputs(rtyper, graph) @@ -43,6 +43,7 @@ else: start = 0 rinputs[0] = r_self + opname = hop.spaceop.opname if opname == "simple_call": arguments = ArgumentsForRtype(args_h(start)) elif opname == "call_args": diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -1,6 +1,5 @@ from rpython.annotator import model as annmodel from rpython.tool.pairtype import pairtype -from rpython.annotator.binaryop import _make_none_union, SomePBC # SomePBC needed by _make_none_union from rpython.annotator.bookkeeper import getbookkeeper from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.annlowlevel import cachedtype @@ -216,6 +215,9 @@ def can_be_none(self): return self.controller.can_be_None + def noneify(self): + return SomeControlledInstance(self.s_real_obj, self.controller) + def rtyper_makerepr(self, rtyper): from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr return ControlledInstanceRepr(rtyper, self.s_real_obj, self.controller) @@ -224,7 +226,6 @@ real_key = self.s_real_obj.rtyper_makekey() return self.__class__, real_key, self.controller -_make_none_union("SomeControlledInstance", "obj.s_real_obj, obj.controller", globals()) class __extend__(SomeControlledInstance): diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -643,7 +643,7 @@ return frame.eval() def op_direct_call(self, f, *args): - FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f)) + FTYPE = lltype.typeOf(f).TO return self.perform_call(f, FTYPE.ARGS, args) def op_indirect_call(self, f, *args): diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -13,6 +13,7 @@ AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable) +from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pairtype @@ -182,10 +183,10 @@ return self.convert_desc(funcdesc) def rtype_simple_call(self, hop): - return self.call('simple_call', hop) + return self.call(hop) def rtype_call_args(self, hop): - return self.call('call_args', hop) + return self.call(hop) def dispatcher(self, shape, index, argtypes, resulttype): key = shape, index, tuple(argtypes), resulttype @@ -218,14 +219,14 @@ links[-1].llexitcase = chr(i) startblock.closeblock(*links) self.rtyper.annotator.translator.graphs.append(graph) - ll_ret = self.rtyper.type_system.getcallable(graph) + ll_ret = getfunctionptr(graph) #FTYPE = FuncType c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) return c_ret - def call(self, opname, hop): + def call(self, hop): bk = self.rtyper.annotator.bookkeeper - args = bk.build_args(opname, hop.args_s[1:]) + args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site @@ -233,7 +234,7 @@ row_of_graphs = self.callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness vlist = [hop.inputarg(self, arg=0)] - vlist += callparse.callparse(self.rtyper, anygraph, hop, opname) + vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() v_dispatcher = self.dispatcher(shape, index, [v.concretetype for v in vlist[1:]], rresult.lowleveltype) @@ -352,11 +353,6 @@ v_func = r_class.getclsfield(v_cls, self.methodname, hop.llops) hop2 = self.add_instance_arg_to_hop(hop, call_args) - opname = 'simple_call' - if call_args: - opname = 'call_args' - hop2.forced_opname = opname - hop2.v_s_insertfirstarg(v_func, s_func) # insert 'function' if type(hop2.args_r[0]) is SmallFunctionSetPBCRepr and type(r_func) is FunctionsPBCRepr: diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -10,6 +10,7 @@ from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.tool.pairtype import pairtype from rpython.rtyper.rmodel import Repr from rpython.rtyper.rint import IntegerRepr @@ -36,8 +37,10 @@ rtyper = self.rtyper s_index = rtyper.annotator.bookkeeper.immutablevalue(index) hop2 = hop.copy() - hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args hop2.args_s = [self.s_tuple, s_index] hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] return hop2.dispatch() @@ -87,8 +90,10 @@ rtyper = self.rtyper s_index = rtyper.annotator.bookkeeper.immutablevalue(index) hop2 = hop.copy() - hop2.forced_opname = 'getitem' - hop2.args_v = [hop2.args_v[0], Constant(index)] + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args hop2.args_s = [self.s_tuple, s_index] hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] return hop2.dispatch() diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -154,11 +154,12 @@ # special-casing for methods: # if s_value is SomePBC([MethodDescs...]) # return a PBC representing the underlying functions - if isinstance(s_value, annmodel.SomePBC): - if not s_value.isNone() and s_value.getKind() == description.MethodDesc: - s_value = self.classdef.lookup_filter(s_value) - funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions] - return annmodel.SomePBC(funcdescs) + if (isinstance(s_value, annmodel.SomePBC) and + not isinstance(s_value, annmodel.SomeNone) and + s_value.getKind() == description.MethodDesc): + s_value = self.classdef.lookup_filter(s_value) + funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions] + return annmodel.SomePBC(funcdescs) return None # not a method def get_ll_eq_function(self): diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -1,4 +1,5 @@ from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.rtyper.error import TyperError from rpython.rtyper.rmodel import Repr from rpython.tool.pairtype import pairtype @@ -71,5 +72,7 @@ s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj hop2.s_result, hop2.r_result = s_new, r_new hop2.v_s_insertfirstarg(c_meth, s_meth) - hop2.forced_opname = 'simple_call' + spaceop = op.simple_call(*hop2.args_v) + spaceop.result = hop2.spaceop.result + hop2.spaceop = spaceop return hop2.dispatch() diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -53,7 +53,7 @@ self._initialized = setupstate.INPROGRESS try: self._setup_repr() - except TyperError, e: + except TyperError: self._initialized = setupstate.BROKEN raise else: diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -25,8 +25,6 @@ from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, MethodOfFrozenPBCRepr) - if self.isNone(): - return none_frozen_pbc_repr kind = self.getKind() if issubclass(kind, description.FunctionDesc): sample = self.any_description() @@ -63,6 +61,13 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t +class __extend__(annmodel.SomeNone): + def rtyper_makerepr(self, rtyper): + return none_frozen_pbc_repr + + def rtyper_makekey(self): + return self.__class__, + # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -300,14 +305,14 @@ return inputconst(typeOf(llfn), llfn) def rtype_simple_call(self, hop): - return self.call('simple_call', hop) + return self.call(hop) def rtype_call_args(self, hop): - return self.call('call_args', hop) + return self.call(hop) - def call(self, opname, hop): + def call(self, hop): bk = self.rtyper.annotator.bookkeeper - args = bk.build_args(opname, hop.args_s[1:]) + args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) vfcs = description.FunctionDesc.variant_for_call_site @@ -317,7 +322,7 @@ vfn = hop.inputarg(self, arg=0) vlist = [self.convert_to_concrete_llfn(vfn, shape, index, hop.llops)] - vlist += callparse.callparse(self.rtyper, anygraph, hop, opname) + vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() if isinstance(vlist[0], Constant): @@ -808,9 +813,6 @@ def __init__(self, rtyper, s_pbc): self.rtyper = rtyper self.s_pbc = s_pbc - if s_pbc.isNone(): - raise TyperError("unsupported: variable of type " - "bound-method-object or None") mdescs = list(s_pbc.descriptions) methodname = mdescs[0].name classdef = mdescs[0].selfclassdef diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -26,7 +26,7 @@ Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError -from rpython.rtyper.typesystem import LowLevelTypeSystem +from rpython.rtyper.typesystem import LowLevelTypeSystem, getfunctionptr from rpython.rtyper.normalizecalls import perform_normalizations from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block @@ -38,7 +38,7 @@ def __init__(self, annotator): self.annotator = annotator self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) - self.type_system = LowLevelTypeSystem.instance + self.type_system = LowLevelTypeSystem() self.reprs = {} self._reprs_must_call_setup = [] self._seen_reprs_must_call_setup = {} @@ -600,7 +600,7 @@ def getconcretetype(v): return self.bindingrepr(v).lowleveltype - return self.type_system.getcallable(graph, getconcretetype) + return getfunctionptr(graph, getconcretetype) def annotate_helper(self, ll_function, argtypes): """Annotate the given low-level helper function and return its graph @@ -650,8 +650,6 @@ class HighLevelOp(object): - forced_opname = None - def __init__(self, rtyper, spaceop, exceptionlinks, llops): self.rtyper = rtyper self.spaceop = spaceop @@ -679,12 +677,11 @@ if type(value) is list: # grunt value = value[:] setattr(result, key, value) - result.forced_opname = self.forced_opname return result def dispatch(self): rtyper = self.rtyper - opname = self.forced_opname or self.spaceop.opname + opname = self.spaceop.opname translate_meth = getattr(rtyper, 'translate_op_' + opname, rtyper.default_translate_operation) return translate_meth(self) @@ -888,7 +885,7 @@ s_value = rtyper.binding(v, default=annmodel.s_None) if not s_value.is_constant(): raise TyperError("non-constant variable of type Void") - if not isinstance(s_value, annmodel.SomePBC): + if not isinstance(s_value, (annmodel.SomePBC, annmodel.SomeNone)): raise TyperError("non-PBC Void argument: %r", (s_value,)) args_s.append(s_value) else: diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -1,84 +1,12 @@ """typesystem.py -- Typesystem-specific operations for RTyper.""" -from rpython.tool.pairtype import extendabletype - from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.error import TyperError -class TypeSystem(object): - __metaclass__ = extendabletype - def derefType(self, T): - raise NotImplementedError() - - def deref(self, obj): - """Dereference `obj' to concrete object.""" - raise NotImplementedError() - - def check_null(self, repr, hop): - """Emit operations to check that `hop's argument is not a null object. -""" - raise NotImplementedError() - - def null_callable(self, T): - """null callable object of type T""" - raise NotImplementedError() - - def getcallabletype(self, ARGS, RESTYPE): - cls = self.callable_trait[0] - return cls(ARGS, RESTYPE) - - def getcallable(self, graph, getconcretetype=None): - """Return callable given a Python function.""" - if getconcretetype is None: - getconcretetype = self.getconcretetype - llinputs = [getconcretetype(v) for v in graph.getargs()] - lloutput = getconcretetype(graph.getreturnvar()) - - typ, constr = self.callable_trait - - FT = typ(llinputs, lloutput) - name = graph.name - if hasattr(graph, 'func') and callable(graph.func): - # the Python function object can have _llfnobjattrs_, specifying - # attributes that are forced upon the functionptr(). The idea - # for not passing these extra attributes as arguments to - # getcallable() itself is that multiple calls to getcallable() - # for the same graph should return equal functionptr() objects. - if hasattr(graph.func, '_llfnobjattrs_'): - fnobjattrs = graph.func._llfnobjattrs_.copy() - # can specify a '_name', but use graph.name by default - name = fnobjattrs.pop('_name', name) - else: - fnobjattrs = {} - # _callable is normally graph.func, but can be overridden: - # see fakeimpl in extfunc.py - _callable = fnobjattrs.pop('_callable', graph.func) - return constr(FT, name, graph = graph, _callable = _callable, - **fnobjattrs) - else: - return constr(FT, name, graph = graph) - - def getexternalcallable(self, ll_args, ll_result, name, **kwds): - typ, constr = self.callable_trait - - FT = typ(ll_args, ll_result) - return constr(FT, name, **kwds) - - def getconcretetype(self, v): - """Helper called by getcallable() to get the conrete type of a variable -in a graph.""" - raise NotImplementedError() - - -class LowLevelTypeSystem(TypeSystem): +class LowLevelTypeSystem(object): name = "lltypesystem" - callable_trait = (lltype.FuncType, lltype.functionptr) - - def derefType(self, T): - assert isinstance(T, lltype.Ptr) - return T.TO def deref(self, obj): assert isinstance(lltype.typeOf(obj), lltype.Ptr) @@ -89,12 +17,13 @@ vlist = hop.inputargs(repr) return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) - def getconcretetype(self, v): - return v.concretetype - def null_callable(self, T): return lltype.nullptr(T.TO) + def getexternalcallable(self, ll_args, ll_result, name, **kwds): + FT = lltype.FuncType(ll_args, ll_result) + return lltype.functionptr(FT, name, **kwds) + def generic_is(self, robj1, robj2, hop): roriginal1 = robj1 roriginal2 = robj2 @@ -113,7 +42,37 @@ v_list = hop.inputargs(robj1, robj2) return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool) -# All typesystems are singletons -LowLevelTypeSystem.instance = LowLevelTypeSystem() -getfunctionptr = LowLevelTypeSystem.instance.getcallable +def _getconcretetype(v): + return v.concretetype + + +def getfunctionptr(graph, getconcretetype=None): + """Return callable given a Python function.""" + if getconcretetype is None: + getconcretetype = _getconcretetype + llinputs = [getconcretetype(v) for v in graph.getargs()] + lloutput = getconcretetype(graph.getreturnvar()) + + FT = lltype.FuncType(llinputs, lloutput) + name = graph.name + if hasattr(graph, 'func') and callable(graph.func): + # the Python function object can have _llfnobjattrs_, specifying + # attributes that are forced upon the functionptr(). The idea + # for not passing these extra attributes as arguments to + # getcallable() itself is that multiple calls to getcallable() + # for the same graph should return equal functionptr() objects. + if hasattr(graph.func, '_llfnobjattrs_'): + fnobjattrs = graph.func._llfnobjattrs_.copy() + # can specify a '_name', but use graph.name by default + name = fnobjattrs.pop('_name', name) + else: + fnobjattrs = {} + # _callable is normally graph.func, but can be overridden: + # see fakeimpl in extfunc.py + _callable = fnobjattrs.pop('_callable', graph.func) + return lltype.functionptr(FT, name, graph = graph, + _callable = _callable, **fnobjattrs) + else: + return lltype.functionptr(FT, name, graph = graph) + diff --git a/rpython/translator/goal/query.py b/rpython/translator/goal/query.py --- a/rpython/translator/goal/query.py +++ b/rpython/translator/goal/query.py @@ -62,7 +62,7 @@ def ismeth(s_val): if not isinstance(s_val, annmodel.SomePBC): return False - if s_val.isNone(): + if isinstance(s_val, annmodel.SomeNone): return False return s_val.getKind() is MethodDesc bk = translator.annotator.bookkeeper From noreply at buildbot.pypy.org Wed May 28 19:29:14 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 28 May 2014 19:29:14 +0200 (CEST) Subject: [pypy-commit] pypy default: small cleanup Message-ID: <20140528172914.BC2E81D2CAA@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71756:bb8dea0b283d Date: 2014-05-28 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/bb8dea0b283d/ Log: small cleanup diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -2,7 +2,7 @@ Type inference for user-defined classes. """ from rpython.annotator.model import ( - SomePBC, SomeNone, s_ImpossibleValue, unionof, s_None, SomeInteger, + SomePBC, s_ImpossibleValue, unionof, s_None, SomeInteger, SomeTuple, SomeString, AnnotatorError) from rpython.annotator import description @@ -104,8 +104,7 @@ self.bookkeeper.annotator.reflowfromposition(position) # check for method demotion and after-the-fact method additions - if (isinstance(s_newvalue, SomePBC) and - not isinstance(s_newvalue, SomeNone)): + if isinstance(s_newvalue, SomePBC): attr = self.name if s_newvalue.getKind() == description.MethodDesc: # is method diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -155,7 +155,6 @@ # if s_value is SomePBC([MethodDescs...]) # return a PBC representing the underlying functions if (isinstance(s_value, annmodel.SomePBC) and - not isinstance(s_value, annmodel.SomeNone) and s_value.getKind() == description.MethodDesc): s_value = self.classdef.lookup_filter(s_value) funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions] From noreply at buildbot.pypy.org Wed May 28 22:05:34 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:34 +0200 (CEST) Subject: [pypy-commit] pypy default: test_ast.py edited online with Bitbucket Message-ID: <20140528200534.710851D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhu man Branch: Changeset: r71761:4573d0b99f0a Date: 2014-03-20 03:05 +0000 http://bitbucket.org/pypy/pypy/changeset/4573d0b99f0a/ Log: test_ast.py edited online with Bitbucket diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -402,7 +402,8 @@ num_node = ast.Num(n=2,lineno=2) try: num_node2 = copy.deepcopy(num_node) - except: raise + except: + raise def test_issue1673_Num_fullinit(self): import ast @@ -410,7 +411,8 @@ num_node = ast.Num(n=2,lineno=2,col_offset=3) try: num_node2 = copy.deepcopy(num_node) - except: raise + except: + raise def test_issue1673_Str(self): import ast @@ -418,4 +420,5 @@ str_node = ast.Str(n=2,lineno=2) try: str_node2 = copy.deepcopy(str_node) - except: raise + except: + raise From noreply at buildbot.pypy.org Wed May 28 22:05:28 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:28 +0200 (CEST) Subject: [pypy-commit] pypy default: add tests to prove issue 1713 and issue 1673 Message-ID: <20140528200528.A30AC1D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71757:5d13e1a985f6 Date: 2014-03-19 22:19 -0400 http://bitbucket.org/pypy/pypy/changeset/5d13e1a985f6/ Log: add tests to prove issue 1713 and issue 1673 diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,3 +387,34 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + def test_dict_astNode(self): + import ast + numMode = ast.Num(n=2,lineno=2,col_offset=3) + dictRes = numMode.__dict__ + assert dictRes['n'] == 2 + assert dictRes['lineno'] == 2 + assert dictRes['col_offset'] == 3 + def test_issue1673_Num_notfullinit(self): + import ast + import copy + numMode = ast.Num(n=2,lineno=2) + try: + numNode2 = copy.deepcopy(numMode) + except: + raise + def test_issue1673_Num_fullinit(self): + import ast + import copy + numMode = ast.Num(n=2,lineno=2,col_offset=3) + try: + numNode2 = copy.deepcopy(numMode) + except: + raise + def test_issue1673_Str(self): + import ast + import copy + strNode = ast.Str(n=2,lineno=2) + try: + strNode2 = copy.deepcopy(strNode) + except: + raise From noreply at buildbot.pypy.org Wed May 28 22:05:35 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:35 +0200 (CEST) Subject: [pypy-commit] pypy default: test_ast.py edited online with Bitbucket Message-ID: <20140528200535.AE4EC1D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhu man Branch: Changeset: r71762:831068f4a34b Date: 2014-03-20 03:17 +0000 http://bitbucket.org/pypy/pypy/changeset/831068f4a34b/ Log: test_ast.py edited online with Bitbucket diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -400,25 +400,17 @@ import ast import copy num_node = ast.Num(n=2,lineno=2) - try: - num_node2 = copy.deepcopy(num_node) - except: - raise - + num_node2 = copy.deepcopy(num_node) + def test_issue1673_Num_fullinit(self): import ast import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) - try: - num_node2 = copy.deepcopy(num_node) - except: - raise + num_node2 = copy.deepcopy(num_node) def test_issue1673_Str(self): import ast import copy str_node = ast.Str(n=2,lineno=2) - try: - str_node2 = copy.deepcopy(str_node) - except: - raise + str_node2 = copy.deepcopy(str_node) + From noreply at buildbot.pypy.org Wed May 28 22:05:30 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:30 +0200 (CEST) Subject: [pypy-commit] pypy default: merge to keep updated Message-ID: <20140528200530.552E21D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71758:6bfc4df6ee95 Date: 2014-03-19 22:24 -0400 http://bitbucket.org/pypy/pypy/changeset/6bfc4df6ee95/ Log: merge to keep updated diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -440,10 +440,11 @@ return name - def getbuiltinmodule(self, name, force_init=False): + def getbuiltinmodule(self, name, force_init=False, reuse=True): w_name = self.wrap(name) w_modules = self.sys.get('modules') if not force_init: + assert reuse is True try: return self.getitem(w_modules, w_name) except OperationError, e: @@ -459,10 +460,18 @@ "getbuiltinmodule() called with non-builtin module %s", name) else: - # And initialize it + # Initialize the module from pypy.interpreter.module import Module if isinstance(w_mod, Module): - w_mod.init(self) + if not reuse and w_mod.startup_called: + # Create a copy of the module + w_mod.getdict(self) # unlazy w_initialdict + w_new = self.wrap(Module(self, w_name)) + self.call_method(w_new.getdict(self), 'update', + w_mod.w_initialdict) + w_mod = w_new + else: + w_mod.init(self) # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) diff --git a/pypy/module/cpyext/include/pystate.h b/pypy/module/cpyext/include/pystate.h --- a/pypy/module/cpyext/include/pystate.h +++ b/pypy/module/cpyext/include/pystate.h @@ -21,9 +21,8 @@ #define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \ } -typedef - enum {PyGILState_LOCKED, PyGILState_UNLOCKED} - PyGILState_STATE; +enum {PyGILState_LOCKED, PyGILState_UNLOCKED}; +typedef int PyGILState_STATE; #define PyThreadState_GET() PyThreadState_Get() diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -204,16 +204,14 @@ # Before external call is after running Python rffi.aroundstate.before() -PyGILState_STATE = rffi.COpaquePtr('PyGILState_STATE', - typedef='PyGILState_STATE', - compilation_info=CConfig._compilation_info_) +PyGILState_STATE = rffi.INT @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL) def PyGILState_Ensure(space): if rffi.aroundstate.after: # After external call is before entering Python rffi.aroundstate.after() - return lltype.nullptr(PyGILState_STATE.TO) + return rffi.cast(PyGILState_STATE, 0) @cpython_api([PyGILState_STATE], lltype.Void) def PyGILState_Release(space, state): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -579,7 +579,8 @@ return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,7 +203,6 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 - skip("fix me") import sys, marshal old = marshal.loads @@ -223,7 +222,6 @@ # taken from https://bugs.pypy.org/issue1514, with extra cases # that show a difference with CPython: we can get on CPython # several module objects for the same built-in module :-( - skip("several built-in module objects: not supported by pypy") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -578,7 +578,6 @@ assert hasattr(time, 'clock') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -586,7 +585,6 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): - skip("fix me") import sys, time oldpath = sys.path time.tzset = "" diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -7,7 +7,7 @@ Py_MARSHAL_VERSION = 2 - at unwrap_spec(w_version = WrappedDefault(Py_MARSHAL_VERSION)) + at unwrap_spec(w_version=WrappedDefault(Py_MARSHAL_VERSION)) def dump(space, w_data, w_f, w_version): """Write the 'data' object into the open file 'f'.""" # special case real files for performance @@ -24,7 +24,7 @@ finally: writer.finished() - at unwrap_spec(w_version = WrappedDefault(Py_MARSHAL_VERSION)) + at unwrap_spec(w_version=WrappedDefault(Py_MARSHAL_VERSION)) def dumps(space, w_data, w_version): """Return the string that would have been written to a file by dump(data, file).""" @@ -217,6 +217,16 @@ self.space.marshal_w(w_obj, self) def dump_w_obj(self, w_obj): + space = self.space + if space.type(w_obj).is_heaptype(): + try: + buf = space.buffer_w(w_obj) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + self.raise_exc("unmarshallable object") + else: + w_obj = space.newbuffer(buf) try: self.put_w_obj(w_obj) except rstackovf.StackOverflow: diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -11,7 +11,7 @@ def w_marshal_check(self, case): import marshal, StringIO s = marshal.dumps(case) - print repr(s) + print(repr(s)) x = marshal.loads(s) assert x == case and type(x) is type(case) f = StringIO.StringIO() @@ -173,10 +173,13 @@ import marshal types = (float, complex, int, long, tuple, list, dict, set, frozenset) for cls in types: + print(cls) class subtype(cls): pass exc = raises(ValueError, marshal.dumps, subtype) assert str(exc.value) == 'unmarshallable object' + exc = raises(ValueError, marshal.dumps, subtype()) + assert str(exc.value) == 'unmarshallable object' def test_valid_subtypes(self): import marshal diff --git a/pypy/objspace/std/memoryview.py b/pypy/objspace/std/memoryview.py --- a/pypy/objspace/std/memoryview.py +++ b/pypy/objspace/std/memoryview.py @@ -176,6 +176,14 @@ self.buf = buf def buffer_w(self, space): + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return self.buf @staticmethod @@ -229,17 +237,6 @@ buf = buffer.SubBuffer(buf, start, size) return W_MemoryView(buf) - def descr_buffer(self, space): - """ - Note that memoryview() is very inconsistent in CPython: it does not - support the buffer interface but does support the new buffer - interface: as a result, it is possible to pass memoryview to - e.g. socket.send() but not to file.write(). For simplicity and - consistency, in PyPy memoryview DOES support buffer(), which means - that it is accepted in more places than CPython. - """ - return space.wrap(self.buf) - def descr_tobytes(self, space): return space.wrap(self.as_str()) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -14,6 +14,11 @@ includes = ['stdio.h', 'sys/types.h'] if os.name == "posix": includes += ['unistd.h'] + ftruncate = 'ftruncate' + fileno = 'fileno' +else: + ftruncate = '_chsize' + fileno = '_fileno' eci = ExternalCompilationInfo(includes=includes) def llexternal(*args, **kwargs): @@ -41,10 +46,10 @@ c_fseek = llexternal('fseek', [lltype.Ptr(FILE), rffi.LONG, rffi.INT], rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) -c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) +c_fileno = llexternal(fileno, [lltype.Ptr(FILE)], rffi.INT) c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) -c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) +c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) From noreply at buildbot.pypy.org Wed May 28 22:05:36 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:36 +0200 (CEST) Subject: [pypy-commit] pypy default: revised the assertions Message-ID: <20140528200536.ECDE21D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71763:e497508e8f06 Date: 2014-03-19 23:35 -0400 http://bitbucket.org/pypy/pypy/changeset/e497508e8f06/ Log: revised the assertions diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -390,27 +390,32 @@ def test_dict_astNode(self): import ast - num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node = ast.Num(n=2, lineno=2, col_offset=3) dict_res = num_node.__dict__ - assert dict_res['n'] == 2 - assert dict_res['lineno'] == 2 - assert dict_res['col_offset'] == 3 - + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + def test_issue1673_Num_notfullinit(self): import ast import copy num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 num_node2 = copy.deepcopy(num_node) - + def test_issue1673_Num_fullinit(self): import ast import copy num_node = ast.Num(n=2,lineno=2,col_offset=3) num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset def test_issue1673_Str(self): import ast import copy str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node2.n == 2 str_node2 = copy.deepcopy(str_node) From noreply at buildbot.pypy.org Wed May 28 22:05:31 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:31 +0200 (CEST) Subject: [pypy-commit] pypy default: test_ast.py edited online with Bitbucket Message-ID: <20140528200531.B27E81D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhu man Branch: Changeset: r71759:36209ce51e77 Date: 2014-03-20 02:49 +0000 http://bitbucket.org/pypy/pypy/changeset/36209ce51e77/ Log: test_ast.py edited online with Bitbucket diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,34 +387,38 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + def test_dict_astNode(self): import ast - numMode = ast.Num(n=2,lineno=2,col_offset=3) - dictRes = numMode.__dict__ - assert dictRes['n'] == 2 - assert dictRes['lineno'] == 2 - assert dictRes['col_offset'] == 3 + num_node = ast.Num(n=2,lineno=2,col_offset=3) + dict_res = num_node.__dict__ + assert dict_res['n'] == 2 + assert dict_res['lineno'] == 2 + assert dict_res['col_offset'] == 3 + def test_issue1673_Num_notfullinit(self): import ast import copy - numMode = ast.Num(n=2,lineno=2) + num_node = ast.Num(n=2,lineno=2) try: - numNode2 = copy.deepcopy(numMode) + num_node2 = copy.deepcopy(num_node) except: raise + def test_issue1673_Num_fullinit(self): import ast import copy - numMode = ast.Num(n=2,lineno=2,col_offset=3) + num_node = ast.Num(n=2,lineno=2,col_offset=3) try: - numNode2 = copy.deepcopy(numMode) + num_node2 = copy.deepcopy(num_node) except: raise + def test_issue1673_Str(self): import ast import copy - strNode = ast.Str(n=2,lineno=2) + str_node = ast.Str(n=2,lineno=2) try: - strNode2 = copy.deepcopy(strNode) + str_node2 = copy.deepcopy(str_node) except: raise From noreply at buildbot.pypy.org Wed May 28 22:05:38 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:38 +0200 (CEST) Subject: [pypy-commit] pypy default: test_ast.py edited online with Bitbucket Message-ID: <20140528200538.509961D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhu man Branch: Changeset: r71764:b3f9ad26cedc Date: 2014-03-20 03:40 +0000 http://bitbucket.org/pypy/pypy/changeset/b3f9ad26cedc/ Log: test_ast.py edited online with Bitbucket diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -416,6 +416,6 @@ import copy str_node = ast.Str(n=2,lineno=2) assert str_node.n == 2 - assert str_node2.n == 2 + assert str_node.lineno == 2 str_node2 = copy.deepcopy(str_node) From noreply at buildbot.pypy.org Wed May 28 22:05:33 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:33 +0200 (CEST) Subject: [pypy-commit] pypy default: test_ast.py edited online with Bitbucket Message-ID: <20140528200533.163B21D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhu man Branch: Changeset: r71760:095bedb3c7ee Date: 2014-03-20 02:56 +0000 http://bitbucket.org/pypy/pypy/changeset/095bedb3c7ee/ Log: test_ast.py edited online with Bitbucket diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -402,8 +402,7 @@ num_node = ast.Num(n=2,lineno=2) try: num_node2 = copy.deepcopy(num_node) - except: - raise + except: raise def test_issue1673_Num_fullinit(self): import ast @@ -411,8 +410,7 @@ num_node = ast.Num(n=2,lineno=2,col_offset=3) try: num_node2 = copy.deepcopy(num_node) - except: - raise + except: raise def test_issue1673_Str(self): import ast @@ -420,5 +418,4 @@ str_node = ast.Str(n=2,lineno=2) try: str_node2 = copy.deepcopy(str_node) - except: - raise + except: raise From noreply at buildbot.pypy.org Wed May 28 22:05:39 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:39 +0200 (CEST) Subject: [pypy-commit] pypy default: solved issue 1713 and issue 1673 Message-ID: <20140528200539.C9A641D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71765:c1a865707d94 Date: 2014-03-30 03:30 -0400 http://bitbucket.org/pypy/pypy/changeset/c1a865707d94/ Log: solved issue 1713 and issue 1673 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -49,13 +49,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) @@ -2913,7 +2919,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 1 def Expression_del_body(space, w_self): @@ -3011,7 +3017,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def stmt_del_lineno(space, w_self): @@ -3038,7 +3044,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def stmt_del_col_offset(space, w_self): @@ -3074,7 +3080,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def FunctionDef_del_name(space, w_self): @@ -3101,7 +3107,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'args') + w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state |= 8 def FunctionDef_del_args(space, w_self): @@ -3201,7 +3207,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 def ClassDef_del_name(space, w_self): @@ -3326,7 +3332,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Return_del_value(space, w_self): @@ -3448,7 +3454,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 8 def Assign_del_value(space, w_self): @@ -3503,7 +3509,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'target') + w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state |= 4 def AugAssign_del_target(space, w_self): @@ -3561,7 +3567,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 16 def AugAssign_del_value(space, w_self): @@ -3616,7 +3622,7 @@ w_self.setdictvalue(space, 'dest', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'dest') + w_self.setdictvalue(space, 'dest', w_new_value) w_self.initialization_state |= 4 def Print_del_dest(space, w_self): @@ -3665,7 +3671,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'nl') + w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 def Print_del_nl(space, w_self): @@ -3721,7 +3727,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'target') + w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state |= 4 def For_del_target(space, w_self): @@ -3750,7 +3756,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'iter') + w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state |= 8 def For_del_iter(space, w_self): @@ -3852,7 +3858,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def While_del_test(space, w_self): @@ -3953,7 +3959,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def If_del_test(space, w_self): @@ -4054,7 +4060,7 @@ w_self.setdictvalue(space, 'context_expr', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'context_expr') + w_self.setdictvalue(space, 'context_expr', w_new_value) w_self.initialization_state |= 4 def With_del_context_expr(space, w_self): @@ -4083,7 +4089,7 @@ w_self.setdictvalue(space, 'optional_vars', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'optional_vars') + w_self.setdictvalue(space, 'optional_vars', w_new_value) w_self.initialization_state |= 8 def With_del_optional_vars(space, w_self): @@ -4161,7 +4167,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'type') + w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state |= 4 def Raise_del_type(space, w_self): @@ -4190,7 +4196,7 @@ w_self.setdictvalue(space, 'inst', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'inst') + w_self.setdictvalue(space, 'inst', w_new_value) w_self.initialization_state |= 8 def Raise_del_inst(space, w_self): @@ -4219,7 +4225,7 @@ w_self.setdictvalue(space, 'tback', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'tback') + w_self.setdictvalue(space, 'tback', w_new_value) w_self.initialization_state |= 16 def Raise_del_tback(space, w_self): @@ -4440,7 +4446,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def Assert_del_test(space, w_self): @@ -4469,7 +4475,7 @@ w_self.setdictvalue(space, 'msg', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'msg') + w_self.setdictvalue(space, 'msg', w_new_value) w_self.initialization_state |= 8 def Assert_del_msg(space, w_self): @@ -4571,7 +4577,7 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'module') + w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 def ImportFrom_del_module(space, w_self): @@ -4620,7 +4626,7 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'level') + w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 def ImportFrom_del_level(space, w_self): @@ -4676,7 +4682,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 4 def Exec_del_body(space, w_self): @@ -4705,7 +4711,7 @@ w_self.setdictvalue(space, 'globals', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'globals') + w_self.setdictvalue(space, 'globals', w_new_value) w_self.initialization_state |= 8 def Exec_del_globals(space, w_self): @@ -4734,7 +4740,7 @@ w_self.setdictvalue(space, 'locals', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'locals') + w_self.setdictvalue(space, 'locals', w_new_value) w_self.initialization_state |= 16 def Exec_del_locals(space, w_self): @@ -4836,7 +4842,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Expr_del_value(space, w_self): @@ -4938,7 +4944,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def expr_del_lineno(space, w_self): @@ -4965,7 +4971,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def expr_del_col_offset(space, w_self): @@ -5080,7 +5086,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'left') + w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state |= 4 def BinOp_del_left(space, w_self): @@ -5138,7 +5144,7 @@ w_self.setdictvalue(space, 'right', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'right') + w_self.setdictvalue(space, 'right', w_new_value) w_self.initialization_state |= 16 def BinOp_del_right(space, w_self): @@ -5222,7 +5228,7 @@ w_self.setdictvalue(space, 'operand', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'operand') + w_self.setdictvalue(space, 'operand', w_new_value) w_self.initialization_state |= 8 def UnaryOp_del_operand(space, w_self): @@ -5274,7 +5280,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'args') + w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state |= 4 def Lambda_del_args(space, w_self): @@ -5303,7 +5309,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 8 def Lambda_del_body(space, w_self): @@ -5357,7 +5363,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'test') + w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state |= 4 def IfExp_del_test(space, w_self): @@ -5386,7 +5392,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'body') + w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state |= 8 def IfExp_del_body(space, w_self): @@ -5415,7 +5421,7 @@ w_self.setdictvalue(space, 'orelse', w_new_value) w_self.initialization_state &= ~16 return - w_self.deldictvalue(space, 'orelse') + w_self.setdictvalue(space, 'orelse', w_new_value) w_self.initialization_state |= 16 def IfExp_del_orelse(space, w_self): @@ -5588,7 +5594,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'elt') + w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state |= 4 def ListComp_del_elt(space, w_self): @@ -5665,7 +5671,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'elt') + w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state |= 4 def SetComp_del_elt(space, w_self): @@ -5742,7 +5748,7 @@ w_self.setdictvalue(space, 'key', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'key') + w_self.setdictvalue(space, 'key', w_new_value) w_self.initialization_state |= 4 def DictComp_del_key(space, w_self): @@ -5771,7 +5777,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 8 def DictComp_del_value(space, w_self): @@ -5849,7 +5855,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'elt') + w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state |= 4 def GeneratorExp_del_elt(space, w_self): @@ -5926,7 +5932,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Yield_del_value(space, w_self): @@ -5979,7 +5985,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'left') + w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state |= 4 def Compare_del_left(space, w_self): @@ -6080,7 +6086,7 @@ w_self.setdictvalue(space, 'func', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'func') + w_self.setdictvalue(space, 'func', w_new_value) w_self.initialization_state |= 4 def Call_del_func(space, w_self): @@ -6153,7 +6159,7 @@ w_self.setdictvalue(space, 'starargs', w_new_value) w_self.initialization_state &= ~32 return - w_self.deldictvalue(space, 'starargs') + w_self.setdictvalue(space, 'starargs', w_new_value) w_self.initialization_state |= 32 def Call_del_starargs(space, w_self): @@ -6182,7 +6188,7 @@ w_self.setdictvalue(space, 'kwargs', w_new_value) w_self.initialization_state &= ~64 return - w_self.deldictvalue(space, 'kwargs') + w_self.setdictvalue(space, 'kwargs', w_new_value) w_self.initialization_state |= 64 def Call_del_kwargs(space, w_self): @@ -6241,7 +6247,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Repr_del_value(space, w_self): @@ -6292,7 +6298,7 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'n') + w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 def Num_del_n(space, w_self): @@ -6343,7 +6349,7 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 's') + w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 def Str_del_s(space, w_self): @@ -6396,7 +6402,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Attribute_del_value(space, w_self): @@ -6423,7 +6429,7 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'attr') + w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 def Attribute_del_attr(space, w_self): @@ -6507,7 +6513,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Subscript_del_value(space, w_self): @@ -6536,7 +6542,7 @@ w_self.setdictvalue(space, 'slice', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'slice') + w_self.setdictvalue(space, 'slice', w_new_value) w_self.initialization_state |= 8 def Subscript_del_slice(space, w_self): @@ -6618,7 +6624,7 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'id') + w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 def Name_del_id(space, w_self): @@ -6853,7 +6859,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 def Const_del_value(space, w_self): @@ -6979,7 +6985,7 @@ w_self.setdictvalue(space, 'lower', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lower') + w_self.setdictvalue(space, 'lower', w_new_value) w_self.initialization_state |= 1 def Slice_del_lower(space, w_self): @@ -7008,7 +7014,7 @@ w_self.setdictvalue(space, 'upper', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'upper') + w_self.setdictvalue(space, 'upper', w_new_value) w_self.initialization_state |= 2 def Slice_del_upper(space, w_self): @@ -7037,7 +7043,7 @@ w_self.setdictvalue(space, 'step', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'step') + w_self.setdictvalue(space, 'step', w_new_value) w_self.initialization_state |= 4 def Slice_del_step(space, w_self): @@ -7139,7 +7145,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 1 def Index_del_value(space, w_self): @@ -7416,7 +7422,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'target') + w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state |= 1 def comprehension_del_target(space, w_self): @@ -7445,7 +7451,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'iter') + w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state |= 2 def comprehension_del_iter(space, w_self): @@ -7521,7 +7527,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'lineno') + w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 def excepthandler_del_lineno(space, w_self): @@ -7548,7 +7554,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'col_offset') + w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 def excepthandler_del_col_offset(space, w_self): @@ -7586,7 +7592,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'type') + w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state |= 4 def ExceptHandler_del_type(space, w_self): @@ -7615,7 +7621,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~8 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 8 def ExceptHandler_del_name(space, w_self): @@ -7716,7 +7722,7 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'vararg') + w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 def arguments_del_vararg(space, w_self): @@ -7746,7 +7752,7 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return - w_self.deldictvalue(space, 'kwarg') + w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 def arguments_del_kwarg(space, w_self): @@ -7824,7 +7830,7 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'arg') + w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 def keyword_del_arg(space, w_self): @@ -7853,7 +7859,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'value') + w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 2 def keyword_del_value(space, w_self): @@ -7905,7 +7911,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return - w_self.deldictvalue(space, 'name') + w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 def alias_del_name(space, w_self): @@ -7935,7 +7941,7 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return - w_self.deldictvalue(space, 'asname') + w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 def alias_del_asname(space, w_self): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -480,7 +480,9 @@ self.emit("w_self.setdictvalue(space, '%s', w_new_value)" % (field.name,), 1) else: - self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) + #self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) + self.emit("w_self.setdictvalue(space, '%s', w_new_value)" + % (field.name,), 1) self.emit("w_self.initialization_state |= %s" % (flag,), 1) self.emit("") @@ -596,13 +598,19 @@ w_type = space.type(self) w_fields = w_type.getdictvalue(space, "_fields") for w_name in space.fixedview(w_fields): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): - space.setitem(w_dict, w_name, + try: + space.setitem(w_dict, w_name, space.getattr(self, w_name)) + except: + pass return space.newtuple([space.type(self), space.newtuple([]), w_dict]) From noreply at buildbot.pypy.org Wed May 28 22:05:41 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:41 +0200 (CEST) Subject: [pypy-commit] pypy default: dd tests for issue 1713 and issue 1673 Message-ID: <20140528200541.0BAD31D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71766:4318681c3a7e Date: 2014-03-30 03:31 -0400 http://bitbucket.org/pypy/pypy/changeset/4318681c3a7e/ Log: dd tests for issue 1713 and issue 1673 diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -387,3 +387,40 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") + + def test_dict_astNode(self): + import ast + num_node = ast.Num(n=2, lineno=2, col_offset=3) + dict_res = num_node.__dict__ + + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Num_notfullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2) + assert num_node.n == 2 + assert num_node.lineno == 2 + num_node2 = copy.deepcopy(num_node) + + def test_issue1673_Num_fullinit(self): + import ast + import copy + num_node = ast.Num(n=2,lineno=2,col_offset=3) + num_node2 = copy.deepcopy(num_node) + assert num_node.n == num_node2.n + assert num_node.lineno == num_node2.lineno + assert num_node.col_offset == num_node2.col_offset + dict_res = num_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + + def test_issue1673_Str(self): + import ast + import copy + str_node = ast.Str(n=2,lineno=2) + assert str_node.n == 2 + assert str_node.lineno == 2 + str_node2 = copy.deepcopy(str_node) + dict_res = str_node2.__dict__ + assert dict_res == {'n':2, 'lineno':2} + \ No newline at end of file From noreply at buildbot.pypy.org Wed May 28 22:05:42 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:42 +0200 (CEST) Subject: [pypy-commit] pypy default: add the set the save_original_object flag to be true when field type is in asdl.builtin_types Message-ID: <20140528200542.481441D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71767:4b4b167bb969 Date: 2014-03-31 22:01 -0400 http://bitbucket.org/pypy/pypy/changeset/4b4b167bb969/ Log: add the set the save_original_object flag to be true when field type is in asdl.builtin_types diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -52,7 +52,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -60,7 +60,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass return space.newtuple([space.type(self), space.newtuple([]), @@ -2919,7 +2919,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 1 def Expression_del_body(space, w_self): @@ -3017,6 +3017,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 @@ -3044,6 +3045,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 @@ -3080,6 +3082,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 @@ -3107,7 +3110,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'args', w_new_value) + w_self.deldictvalue(space, 'args') w_self.initialization_state |= 8 def FunctionDef_del_args(space, w_self): @@ -3207,6 +3210,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 4 @@ -3332,7 +3336,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Return_del_value(space, w_self): @@ -3454,7 +3458,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 8 def Assign_del_value(space, w_self): @@ -3509,7 +3513,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'target', w_new_value) + w_self.deldictvalue(space, 'target') w_self.initialization_state |= 4 def AugAssign_del_target(space, w_self): @@ -3567,7 +3571,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 16 def AugAssign_del_value(space, w_self): @@ -3622,7 +3626,7 @@ w_self.setdictvalue(space, 'dest', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'dest', w_new_value) + w_self.deldictvalue(space, 'dest') w_self.initialization_state |= 4 def Print_del_dest(space, w_self): @@ -3671,6 +3675,7 @@ w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state &= ~16 return + # need to save the original object too w_self.setdictvalue(space, 'nl', w_new_value) w_self.initialization_state |= 16 @@ -3727,7 +3732,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'target', w_new_value) + w_self.deldictvalue(space, 'target') w_self.initialization_state |= 4 def For_del_target(space, w_self): @@ -3756,7 +3761,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'iter', w_new_value) + w_self.deldictvalue(space, 'iter') w_self.initialization_state |= 8 def For_del_iter(space, w_self): @@ -3858,7 +3863,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def While_del_test(space, w_self): @@ -3959,7 +3964,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def If_del_test(space, w_self): @@ -4060,7 +4065,7 @@ w_self.setdictvalue(space, 'context_expr', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'context_expr', w_new_value) + w_self.deldictvalue(space, 'context_expr') w_self.initialization_state |= 4 def With_del_context_expr(space, w_self): @@ -4089,7 +4094,7 @@ w_self.setdictvalue(space, 'optional_vars', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'optional_vars', w_new_value) + w_self.deldictvalue(space, 'optional_vars') w_self.initialization_state |= 8 def With_del_optional_vars(space, w_self): @@ -4167,7 +4172,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'type', w_new_value) + w_self.deldictvalue(space, 'type') w_self.initialization_state |= 4 def Raise_del_type(space, w_self): @@ -4196,7 +4201,7 @@ w_self.setdictvalue(space, 'inst', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'inst', w_new_value) + w_self.deldictvalue(space, 'inst') w_self.initialization_state |= 8 def Raise_del_inst(space, w_self): @@ -4225,7 +4230,7 @@ w_self.setdictvalue(space, 'tback', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'tback', w_new_value) + w_self.deldictvalue(space, 'tback') w_self.initialization_state |= 16 def Raise_del_tback(space, w_self): @@ -4446,7 +4451,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def Assert_del_test(space, w_self): @@ -4475,7 +4480,7 @@ w_self.setdictvalue(space, 'msg', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'msg', w_new_value) + w_self.deldictvalue(space, 'msg') w_self.initialization_state |= 8 def Assert_del_msg(space, w_self): @@ -4577,6 +4582,7 @@ w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'module', w_new_value) w_self.initialization_state |= 4 @@ -4626,6 +4632,7 @@ w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state &= ~16 return + # need to save the original object too w_self.setdictvalue(space, 'level', w_new_value) w_self.initialization_state |= 16 @@ -4682,7 +4689,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 4 def Exec_del_body(space, w_self): @@ -4711,7 +4718,7 @@ w_self.setdictvalue(space, 'globals', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'globals', w_new_value) + w_self.deldictvalue(space, 'globals') w_self.initialization_state |= 8 def Exec_del_globals(space, w_self): @@ -4740,7 +4747,7 @@ w_self.setdictvalue(space, 'locals', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'locals', w_new_value) + w_self.deldictvalue(space, 'locals') w_self.initialization_state |= 16 def Exec_del_locals(space, w_self): @@ -4842,7 +4849,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Expr_del_value(space, w_self): @@ -4944,6 +4951,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 @@ -4971,6 +4979,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 @@ -5086,7 +5095,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'left', w_new_value) + w_self.deldictvalue(space, 'left') w_self.initialization_state |= 4 def BinOp_del_left(space, w_self): @@ -5144,7 +5153,7 @@ w_self.setdictvalue(space, 'right', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'right', w_new_value) + w_self.deldictvalue(space, 'right') w_self.initialization_state |= 16 def BinOp_del_right(space, w_self): @@ -5228,7 +5237,7 @@ w_self.setdictvalue(space, 'operand', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'operand', w_new_value) + w_self.deldictvalue(space, 'operand') w_self.initialization_state |= 8 def UnaryOp_del_operand(space, w_self): @@ -5280,7 +5289,7 @@ w_self.setdictvalue(space, 'args', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'args', w_new_value) + w_self.deldictvalue(space, 'args') w_self.initialization_state |= 4 def Lambda_del_args(space, w_self): @@ -5309,7 +5318,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 8 def Lambda_del_body(space, w_self): @@ -5363,7 +5372,7 @@ w_self.setdictvalue(space, 'test', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'test', w_new_value) + w_self.deldictvalue(space, 'test') w_self.initialization_state |= 4 def IfExp_del_test(space, w_self): @@ -5392,7 +5401,7 @@ w_self.setdictvalue(space, 'body', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'body', w_new_value) + w_self.deldictvalue(space, 'body') w_self.initialization_state |= 8 def IfExp_del_body(space, w_self): @@ -5421,7 +5430,7 @@ w_self.setdictvalue(space, 'orelse', w_new_value) w_self.initialization_state &= ~16 return - w_self.setdictvalue(space, 'orelse', w_new_value) + w_self.deldictvalue(space, 'orelse') w_self.initialization_state |= 16 def IfExp_del_orelse(space, w_self): @@ -5594,7 +5603,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'elt', w_new_value) + w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 def ListComp_del_elt(space, w_self): @@ -5671,7 +5680,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'elt', w_new_value) + w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 def SetComp_del_elt(space, w_self): @@ -5748,7 +5757,7 @@ w_self.setdictvalue(space, 'key', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'key', w_new_value) + w_self.deldictvalue(space, 'key') w_self.initialization_state |= 4 def DictComp_del_key(space, w_self): @@ -5777,7 +5786,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 8 def DictComp_del_value(space, w_self): @@ -5855,7 +5864,7 @@ w_self.setdictvalue(space, 'elt', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'elt', w_new_value) + w_self.deldictvalue(space, 'elt') w_self.initialization_state |= 4 def GeneratorExp_del_elt(space, w_self): @@ -5932,7 +5941,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Yield_del_value(space, w_self): @@ -5985,7 +5994,7 @@ w_self.setdictvalue(space, 'left', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'left', w_new_value) + w_self.deldictvalue(space, 'left') w_self.initialization_state |= 4 def Compare_del_left(space, w_self): @@ -6086,7 +6095,7 @@ w_self.setdictvalue(space, 'func', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'func', w_new_value) + w_self.deldictvalue(space, 'func') w_self.initialization_state |= 4 def Call_del_func(space, w_self): @@ -6159,7 +6168,7 @@ w_self.setdictvalue(space, 'starargs', w_new_value) w_self.initialization_state &= ~32 return - w_self.setdictvalue(space, 'starargs', w_new_value) + w_self.deldictvalue(space, 'starargs') w_self.initialization_state |= 32 def Call_del_starargs(space, w_self): @@ -6188,7 +6197,7 @@ w_self.setdictvalue(space, 'kwargs', w_new_value) w_self.initialization_state &= ~64 return - w_self.setdictvalue(space, 'kwargs', w_new_value) + w_self.deldictvalue(space, 'kwargs') w_self.initialization_state |= 64 def Call_del_kwargs(space, w_self): @@ -6247,7 +6256,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Repr_del_value(space, w_self): @@ -6298,6 +6307,7 @@ w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'n', w_new_value) w_self.initialization_state |= 4 @@ -6349,6 +6359,7 @@ w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 's', w_new_value) w_self.initialization_state |= 4 @@ -6402,7 +6413,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Attribute_del_value(space, w_self): @@ -6429,6 +6440,7 @@ w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state &= ~8 return + # need to save the original object too w_self.setdictvalue(space, 'attr', w_new_value) w_self.initialization_state |= 8 @@ -6513,7 +6525,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 4 def Subscript_del_value(space, w_self): @@ -6542,7 +6554,7 @@ w_self.setdictvalue(space, 'slice', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'slice', w_new_value) + w_self.deldictvalue(space, 'slice') w_self.initialization_state |= 8 def Subscript_del_slice(space, w_self): @@ -6624,6 +6636,7 @@ w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'id', w_new_value) w_self.initialization_state |= 4 @@ -6859,6 +6872,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state |= 4 @@ -6985,7 +6999,7 @@ w_self.setdictvalue(space, 'lower', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'lower', w_new_value) + w_self.deldictvalue(space, 'lower') w_self.initialization_state |= 1 def Slice_del_lower(space, w_self): @@ -7014,7 +7028,7 @@ w_self.setdictvalue(space, 'upper', w_new_value) w_self.initialization_state &= ~2 return - w_self.setdictvalue(space, 'upper', w_new_value) + w_self.deldictvalue(space, 'upper') w_self.initialization_state |= 2 def Slice_del_upper(space, w_self): @@ -7043,7 +7057,7 @@ w_self.setdictvalue(space, 'step', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'step', w_new_value) + w_self.deldictvalue(space, 'step') w_self.initialization_state |= 4 def Slice_del_step(space, w_self): @@ -7145,7 +7159,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 1 def Index_del_value(space, w_self): @@ -7422,7 +7436,7 @@ w_self.setdictvalue(space, 'target', w_new_value) w_self.initialization_state &= ~1 return - w_self.setdictvalue(space, 'target', w_new_value) + w_self.deldictvalue(space, 'target') w_self.initialization_state |= 1 def comprehension_del_target(space, w_self): @@ -7451,7 +7465,7 @@ w_self.setdictvalue(space, 'iter', w_new_value) w_self.initialization_state &= ~2 return - w_self.setdictvalue(space, 'iter', w_new_value) + w_self.deldictvalue(space, 'iter') w_self.initialization_state |= 2 def comprehension_del_iter(space, w_self): @@ -7527,6 +7541,7 @@ w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'lineno', w_new_value) w_self.initialization_state |= 1 @@ -7554,6 +7569,7 @@ w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'col_offset', w_new_value) w_self.initialization_state |= 2 @@ -7592,7 +7608,7 @@ w_self.setdictvalue(space, 'type', w_new_value) w_self.initialization_state &= ~4 return - w_self.setdictvalue(space, 'type', w_new_value) + w_self.deldictvalue(space, 'type') w_self.initialization_state |= 4 def ExceptHandler_del_type(space, w_self): @@ -7621,7 +7637,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~8 return - w_self.setdictvalue(space, 'name', w_new_value) + w_self.deldictvalue(space, 'name') w_self.initialization_state |= 8 def ExceptHandler_del_name(space, w_self): @@ -7722,6 +7738,7 @@ w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'vararg', w_new_value) w_self.initialization_state |= 2 @@ -7752,6 +7769,7 @@ w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state &= ~4 return + # need to save the original object too w_self.setdictvalue(space, 'kwarg', w_new_value) w_self.initialization_state |= 4 @@ -7830,6 +7848,7 @@ w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'arg', w_new_value) w_self.initialization_state |= 1 @@ -7859,7 +7878,7 @@ w_self.setdictvalue(space, 'value', w_new_value) w_self.initialization_state &= ~2 return - w_self.setdictvalue(space, 'value', w_new_value) + w_self.deldictvalue(space, 'value') w_self.initialization_state |= 2 def keyword_del_value(space, w_self): @@ -7911,6 +7930,7 @@ w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state &= ~1 return + # need to save the original object too w_self.setdictvalue(space, 'name', w_new_value) w_self.initialization_state |= 1 @@ -7941,6 +7961,7 @@ w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state &= ~2 return + # need to save the original object too w_self.setdictvalue(space, 'asname', w_new_value) w_self.initialization_state |= 2 diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -459,6 +459,7 @@ self.emit("raise OperationError(space.w_TypeError, " "space.w_None)", 3) else: + save_original_object = True level = 2 if field.opt and field.type.value != "int": self.emit("if space.is_w(w_new_value, space.w_None):", 2) @@ -480,9 +481,7 @@ self.emit("w_self.setdictvalue(space, '%s', w_new_value)" % (field.name,), 1) else: - #self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) - self.emit("w_self.setdictvalue(space, '%s', w_new_value)" - % (field.name,), 1) + self.emit("w_self.deldictvalue(space, '%s')" %(field.name,), 1) self.emit("w_self.initialization_state |= %s" % (flag,), 1) self.emit("") @@ -601,7 +600,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -609,7 +608,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except: + except Exception: pass return space.newtuple([space.type(self), space.newtuple([]), From noreply at buildbot.pypy.org Wed May 28 22:05:43 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:43 +0200 (CEST) Subject: [pypy-commit] pypy default: modify exception to OperationError Message-ID: <20140528200543.7C9C91D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71768:49cf2e600e22 Date: 2014-03-31 22:55 -0400 http://bitbucket.org/pypy/pypy/changeset/49cf2e600e22/ Log: modify exception to OperationError diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -52,7 +52,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -60,7 +60,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass return space.newtuple([space.type(self), space.newtuple([]), diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -600,7 +600,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass w_attrs = space.findattr(w_type, space.wrap("_attributes")) if w_attrs: @@ -608,7 +608,7 @@ try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) - except Exception: + except OperationError: pass return space.newtuple([space.type(self), space.newtuple([]), From noreply at buildbot.pypy.org Wed May 28 22:05:44 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 22:05:44 +0200 (CEST) Subject: [pypy-commit] pypy gc-pinning: add pinned gc flag Message-ID: <20140528200544.C02E61D2CB8@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc-pinning Changeset: r71769:5530f6e9f4ca Date: 2014-05-28 15:41 -0400 http://bitbucket.org/pypy/pypy/changeset/5530f6e9f4ca/ Log: add pinned gc flag diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -132,8 +132,8 @@ # The following flag is set on surviving raw-malloced young objects during # a minor collection. GCFLAG_VISITED_RMY = first_gcflag << 8 - -_GCFLAG_FIRST_UNUSED = first_gcflag << 9 # the first unused bit +GCFLAG_PINNED = first_gcflag << 9 +_GCFLAG_FIRST_UNUSED = first_gcflag << 10 # the first unused bit # States for the incremental GC From noreply at buildbot.pypy.org Wed May 28 23:05:09 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 23:05:09 +0200 (CEST) Subject: [pypy-commit] pypy default: merge three heads Message-ID: <20140528210509.482A61D2808@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71770:10608abfd026 Date: 2014-05-28 16:56 -0400 http://bitbucket.org/pypy/pypy/changeset/10608abfd026/ Log: merge three heads diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -397,13 +397,14 @@ dict_res = num_node.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + def test_issue1673_Num_notfullinit(self): import ast import copy num_node = ast.Num(n=2,lineno=2) assert num_node.n == 2 assert num_node.lineno == 2 - num_node2 = copy.deepcopy(num_node) + num_node = copy.deepcopy(num_node) def test_issue1673_Num_fullinit(self): import ast @@ -416,6 +417,7 @@ dict_res = num_node2.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + def test_issue1673_Str(self): import ast import copy From noreply at buildbot.pypy.org Wed May 28 23:05:10 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 28 May 2014 23:05:10 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140528210510.A84841D2808@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: Changeset: r71771:56d81b413df8 Date: 2014-05-28 17:04 -0400 http://bitbucket.org/pypy/pypy/changeset/56d81b413df8/ Log: merge heads diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -390,7 +390,7 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") - + def test_dict_astNode(self): import ast num_node = ast.Num(n=2, lineno=2, col_offset=3) @@ -404,7 +404,7 @@ num_node = ast.Num(n=2,lineno=2) assert num_node.n == 2 assert num_node.lineno == 2 - num_node = copy.deepcopy(num_node) + num_node2 = copy.deepcopy(num_node) def test_issue1673_Num_fullinit(self): import ast @@ -418,6 +418,7 @@ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} + def test_issue1673_Str(self): import ast import copy @@ -427,3 +428,4 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} + From noreply at buildbot.pypy.org Wed May 28 23:07:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 28 May 2014 23:07:51 +0200 (CEST) Subject: [pypy-commit] pypy default: remove whitespace changes Message-ID: <20140528210751.2B8621D2808@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r71772:ca09a7211645 Date: 2014-05-28 17:07 -0400 http://bitbucket.org/pypy/pypy/changeset/ca09a7211645/ Log: remove whitespace changes diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -390,14 +390,13 @@ mod.body[0].body[0].handlers[0].lineno = 7 mod.body[0].body[0].handlers[1].lineno = 6 code = compile(mod, "", "exec") - + def test_dict_astNode(self): import ast num_node = ast.Num(n=2, lineno=2, col_offset=3) dict_res = num_node.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - def test_issue1673_Num_notfullinit(self): import ast import copy @@ -417,8 +416,6 @@ dict_res = num_node2.__dict__ assert dict_res == {'n':2, 'lineno':2, 'col_offset':3} - - def test_issue1673_Str(self): import ast import copy @@ -428,4 +425,3 @@ str_node2 = copy.deepcopy(str_node) dict_res = str_node2.__dict__ assert dict_res == {'n':2, 'lineno':2} - From noreply at buildbot.pypy.org Thu May 29 04:12:07 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 29 May 2014 04:12:07 +0200 (CEST) Subject: [pypy-commit] pypy default: rename NoneFrozenPBCRepr to NoneRepr and move it to rpython.rtyper.rnone Message-ID: <20140529021207.7CA041D2837@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71773:8faa5a81be74 Date: 2014-05-29 03:10 +0100 http://bitbucket.org/pypy/pypy/changeset/8faa5a81be74/ Log: rename NoneFrozenPBCRepr to NoneRepr and move it to rpython.rtyper.rnone diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -160,8 +160,6 @@ def consider_call_site_for_pbc(self, s_callable, args, s_result, call_op): descs = list(s_callable.descriptions) - if not descs: - return family = descs[0].getcallfamily() s_callable.getKind().consider_call_site(self, family, descs, args, s_result, call_op) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -486,7 +486,7 @@ assert False def specialize_call(self, hop): - from rpython.rtyper import rpbc + from rpython.rtyper.rnone import NoneRepr PTR = hop.r_result.lowleveltype if isinstance(PTR, lltype.Ptr): T = lltype.Ptr @@ -496,7 +496,7 @@ assert False hop.exception_cannot_occur() - if isinstance(hop.args_r[1], rpbc.NoneFrozenPBCRepr): + if isinstance(hop.args_r[1], NoneRepr): return hop.inputconst(PTR, null) v_arg = hop.inputarg(hop.args_r[1], arg=1) assert isinstance(v_arg.concretetype, T) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -7,39 +7,15 @@ from rpython.rtyper.lltypesystem import rclass, llmemory from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) -from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rmodel import Repr, inputconst from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, - SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, - get_concrete_calltable) + SingleFrozenPBCRepr, get_concrete_calltable) from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pairtype -def rtype_is_None(robj1, rnone2, hop, pos=0): - if isinstance(robj1.lowleveltype, Ptr): - v1 = hop.inputarg(robj1, pos) - return hop.genop('ptr_iszero', [v1], resulttype=Bool) - elif robj1.lowleveltype == llmemory.Address: - v1 = hop.inputarg(robj1, pos) - cnull = hop.inputconst(llmemory.Address, robj1.null_instance()) - return hop.genop('adr_eq', [v1, cnull], resulttype=Bool) - elif robj1 == none_frozen_pbc_repr: - return hop.inputconst(Bool, True) - elif isinstance(robj1, SmallFunctionSetPBCRepr): - if robj1.s_pbc.can_be_None: - v1 = hop.inputarg(robj1, pos) - return hop.genop('char_eq', [v1, inputconst(Char, '\000')], - resulttype=Bool) - else: - return inputconst(Bool, False) - else: - raise TyperError('rtype_is_None of %r' % (robj1)) - - -# ____________________________________________________________ - class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): """Representation selected for multiple non-callable pre-built constants.""" def __init__(self, rtyper, access_set): diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -4,9 +4,8 @@ from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse -from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import typeOf, Void, Bool +from rpython.rtyper.lltypesystem.lltype import typeOf, Void from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, inputdesc, warning, impossible_repr) from rpython.tool.pairtype import pair, pairtype @@ -23,8 +22,7 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, - SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, - MethodOfFrozenPBCRepr) + SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr) kind = self.getKind() if issubclass(kind, description.FunctionDesc): sample = self.any_description() @@ -61,13 +59,6 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t -class __extend__(annmodel.SomeNone): - def rtyper_makerepr(self, rtyper): - return none_frozen_pbc_repr - - def rtyper_makekey(self): - return self.__class__, - # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -589,56 +580,6 @@ def convert_from_to((r_from, r_to), v, llops): return pair(r_from.r_im_self, r_to.r_im_self).convert_from_to(v, llops) -# __ None ____________________________________________________ -class NoneFrozenPBCRepr(Repr): - lowleveltype = Void - - def rtype_bool(self, hop): - return Constant(False, Bool) - - def none_call(self, hop): - raise TyperError("attempt to call constant None") - - def ll_str(self, none): - return llstr("None") - - def get_ll_eq_function(self): - return None - - def get_ll_hash_function(self): - return ll_none_hash - - rtype_simple_call = none_call - rtype_call_args = none_call - -none_frozen_pbc_repr = NoneFrozenPBCRepr() - -def ll_none_hash(_): - return 0 - - -class __extend__(pairtype(Repr, NoneFrozenPBCRepr)): - - def convert_from_to((r_from, _), v, llops): - return inputconst(Void, None) - - def rtype_is_((robj1, rnone2), hop): - from rpython.rtyper.lltypesystem.rpbc import rtype_is_None - if hop.s_result.is_constant(): - return hop.inputconst(Bool, hop.s_result.const) - return rtype_is_None(robj1, rnone2, hop) - -class __extend__(pairtype(NoneFrozenPBCRepr, Repr)): - - def convert_from_to((_, r_to), v, llops): - return inputconst(r_to, None) - - def rtype_is_((rnone1, robj2), hop): - from rpython.rtyper.lltypesystem.rpbc import rtype_is_None - if hop.s_result.is_constant(): - return hop.inputconst(Bool, hop.s_result.const) - return rtype_is_None(robj2, rnone1, hop, pos=1) - # ____________________________________________________________ class AbstractClassesPBCRepr(Repr): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -936,7 +936,7 @@ # _______________________________________________________________________ # this has the side-effect of registering the unary and binary operations # and the rtyper_chooserepr() methods -from rpython.rtyper import rint, rbool, rfloat +from rpython.rtyper import rint, rbool, rfloat, rnone from rpython.rtyper import rrange from rpython.rtyper import rstr, rdict, rlist, rbytearray from rpython.rtyper import rclass, rbuiltin, rpbc From noreply at buildbot.pypy.org Thu May 29 11:00:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 29 May 2014 11:00:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: more text in the performance section Message-ID: <20140529090034.A16D41C0385@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5280:b1b187b808d6 Date: 2014-05-29 11:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/b1b187b808d6/ Log: more text in the performance section diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1091,6 +1091,30 @@ \subsection{Performance Benchmarks\label{sec:performance-bench}} +In this section we want to look at the general performance of our +system. As explained above, the JIT does not simply speed up the +performance by a certain factor. In order to better understand the +behaviour of our system, we therefore split this section into two +parts. We first look at how it behaves without the JIT, and then with +the JIT. We look at six benchmarks: + +\begin{itemize} +\item \emph{btree} and \emph{skiplist}, which are both inserting, + removing, and finding elements in a data structure +\item \emph{threadworms}, which simulates worms walking on a grid in + parallel and checking for collisions with each other +\item \emph{mandelbrot}, \emph{raytrace}, and \emph{richards}, which + all perform simple, independent computations in parallel +\end{itemize} + +We use coarse-grained locking for the first three benchmarks and no +locking for the last three. This is important because Jython, which +uses fine-grained locking instead of a GIL, is only expected to scale +with the number of threads for the latter group. It is not able to +scale when using coarse-grained locking. STM, however, uses atomic +blocks instead, so it may still be able to scale since they are +implemented as simple transactions. + % To isolate factors we look at performance w/o JIT and perf w JIT. % w/o JIT: % - it scales @@ -1101,13 +1125,27 @@ % - changed working set size because generally ~50x faster % - competing w our own JIT on non-stm a challenge % - gil scales negatively -\remi{For performance we first look at no-JIT behaviour of STM. Since -we cannot compete well even with CPython, we later show JIT benchmarks -where we see the unstable performance but also that we can still scale. -(with more work we can use our STM system to parallelise jitted code -too)} See figure \ref{fig:performance-nojit} -% TODO: pypy-nostm, Jython? +\paragraph{Non-JIT benchmarks:} First we run our benchmarks on four +different interpreters: Jython (fine-grained locking), CPython (GIL), +and PyPy with STM and with the GIL (both without the JIT). The results +are shown in \ref{fig:performance-nojit}. + +As expected, all interpreters with a GIL do not scale with the number +of threads. They even become slower because of the overhead of +thread-switching and GIL handling. We also see Jython scale when we +expect it to (mandelbrot, raytrace, richards), and behave similar to +the GIL interpreters in the other cases. + +PyPy using our STM system (pypy-stm-nojit) scales in all benchmarks to +a certain degree. We see that the average overhead from switching from +GIL to STM is \remi{$35.5\%$}, the maximum in richards is +\remi{$63\%$}. pypy-stm-nojit beats pypy-nojit already on two threads; +however, it never even beats CPython, the reference implementation of +Python. This means that without the JIT, our performance is not +competitive. We now look at how well our system works when we enable +the JIT. + \begin{figure}[h] \centering \includegraphics[width=1\columnwidth]{plots/performance_nojit.pdf} @@ -1115,11 +1153,20 @@ \end{figure} -% TODO: Jython, compare to cpython? or just jython as common baseline with no-jit? -\remi{Some benchmarks (figure \ref{fig:performance-jit} with enabled -JIT show that we can be competitive with the other solutions. It also -shows that more work is needed in that area to make performance more -stable.} +\paragraph{JIT benchmarks:} The speedups from enabling the JIT in +these benchmarks range from $10-50\times$. This is why we had to do +without Jython and CPython here, since they would be much further up +in the plots. Also, in order to get more stable results, we increased +the input size of all benchmarks to get reasonable execution times. + +The results are shown in \ref{fig:performance-nojit}. We see that the +performance is much less stable. There is certainly more work required +in this area. In general, we see that the group of non-locked +benchmarks certainly scales best. The other three scale barely or not +at all with the number of threads. The slowdown factor from GIL to STM +ranges around \remi{$1-2.4\times$} and we beat GIL performance in half +of the benchmarks. + \begin{figure}[h] \centering @@ -1127,6 +1174,14 @@ \caption{Comparing runtime between interpreters with JIT\label{fig:performance-jit}} \end{figure} + +Overall PyPy needs the JIT in order for its performance to be +competitive. It would be interesting to see how using our STM system +in CPython would perform, but it is a lot of work. On its own, our +system scales well so we hope to also see that with the JIT in the +future. + + \section{Related Work} diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf index 77bf2dcfa24532e9526c848ceba949109221dd2a..91fb1a124c3de207c8b7a0781e2869c04d295d85 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/performance_nojit.pdf b/talk/dls2014/paper/plots/performance_nojit.pdf index 3f0d4de4954fcf4899ec81cc72cfac591c344941..d70024b9d2b9fa5ecc474a6fdf12f8aec8a29d36 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py --- a/talk/dls2014/paper/plots/plot_performance.py +++ b/talk/dls2014/paper/plots/plot_performance.py @@ -32,7 +32,7 @@ interps_styles = { - "pypy-stm-jit": {'fmt':'r-'}, + "pypy-stm-jit": {'fmt':'r-', 'linewidth':2}, "pypy-jit": {'fmt':'b', 'dashes':(1,1)}, "jython": {'fmt':'m', 'dashes':(2, 5)}, "best": {'fmt':"k:"} # only fmt allowed @@ -161,8 +161,8 @@ if interp not in legend: legend[interp] = artist - legend["best"], = ax.plot(ts, [best_y] * len(ts), - interps_styles["best"]['fmt']) + # legend["best"], = ax.plot(ts, [best_y] * len(ts), + # interps_styles["best"]['fmt']) if i // w == h-1: ax.set_xlim(0, 5) @@ -174,7 +174,7 @@ return axs[w*(h-1)].legend(tuple(legend.values()), tuple(legend.keys()), ncol=4, - loc=(0,-0.4)) + loc=(-0.15,-0.5)) def main(): diff --git a/talk/dls2014/paper/plots/plot_performance_nojit.py b/talk/dls2014/paper/plots/plot_performance_nojit.py --- a/talk/dls2014/paper/plots/plot_performance_nojit.py +++ b/talk/dls2014/paper/plots/plot_performance_nojit.py @@ -30,8 +30,9 @@ interps_styles = { - "pypy-stm-nojit": {'fmt':'r-'}, + "pypy-stm-nojit": {'fmt':'r-', 'linewidth':2}, "cpython": {'fmt':'b', 'dashes':(1,1)}, + "pypy-nojit": {'fmt':'g', 'dashes':(5, 2)}, "jython": {'fmt':'m', 'dashes':(2, 5)}, "best": {'fmt':"k:"} # only fmt allowed } @@ -53,10 +54,16 @@ [2.84] ], "jython":[ - [2.74,2.75], - [2.9,3.1,3.0], - [2.89,3.01,2.95], - [3.0,2.99,2.97] + [2.95,2.95,2.96], + [1.65,1.68,1.54], + [1.2,1.15,1.3,1.3], + [1.09,0.9,0.97,0.99,1.03] + ], + "pypy-nojit":[ + [5.5,5.7,5.8], + [7,6.97], + [6.68,6.77], + [6.4,6.4] ]}, "btree":{ @@ -77,6 +84,12 @@ [2.60,2.46,2.6], [2.56,2.6,2.51], [2.57,2.52,2.48] + ], + "pypy-nojit":[ + [6.63,6.73], + [10.6,10.5], + [11.4,11.4], + [12.0,12.3] ]}, "skiplist":{ @@ -97,6 +110,12 @@ [1.8,1.77,1.81], [1.81,1.79,1.88], [1.99,1.92,1.74,1.84] + ], + "pypy-nojit":[ + [4.9,4.8,4.6,4.7], + [6.87,7.53,6.64], + [7.74,7.3,7.35], + [7.38,7.28,7.31,7.54] ]}, "threadworms":{ @@ -117,6 +136,12 @@ [3.0,2.87,3.3,3.1], [3.35,3.22,3.19], [3.19,3.37,3.26,3.36] + ], + "pypy-nojit":[ + [4.49,4.36], + [7.86,7.81], + [8.76,8.73], + [9.23,9.27] ]}, "mandelbrot":{ @@ -137,11 +162,17 @@ [2.84,3,2.8,2.96], [2.13,2.03,2.04,2.11], [1.8,1.74,1.8,1.88] + ], + "pypy-nojit":[ + [3.67,3.54], + [4.53,4.82,4.75], + [4.14,4.23], + [4.38,4.23] ]}, "richards":{ "pypy-stm-nojit":[ - [11.2], + [10.7], [6.1], [5.4,4.9], [4.8,4.9,5] @@ -157,9 +188,23 @@ [2.32,1.95,2.18], [1.86,1.66], [1.49,1.63,1.59] + ], + "pypy-nojit":[ + [6.6,6.5], + [7.98,7.98], + [7.56,7.33], + [7.05,7.28] ]} } +import numpy as np +sls = [] +for bench_name, interps in benchs.items(): + slowdown = np.mean(interps["pypy-stm-nojit"][0]) / np.mean(interps["pypy-nojit"][0]) + print "overhead", bench_name, ":", slowdown + sls.append(slowdown) +print "avg,max slowdown of STM", np.mean(sls), np.max(sls) + From noreply at buildbot.pypy.org Thu May 29 11:27:38 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 29 May 2014 11:27:38 +0200 (CEST) Subject: [pypy-commit] benchmarks default: be fair to jython Message-ID: <20140529092738.AB1341D2837@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r263:a26f2fb58413 Date: 2014-05-29 11:27 +0200 http://bitbucket.org/pypy/benchmarks/changeset/a26f2fb58413/ Log: be fair to jython diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -7,10 +7,19 @@ print_abort_info, hint_commit_soon) import time +import platform +if platform.python_implementation() == "Jython": + # be fair to jython and don't use a lock where none is required: + class fakeatomic: + def __enter__(self): + pass + def __exit__(self,*args): + pass + atomic = fakeatomic() + + AMBIENT = 0.1 - - class Vector(object): def __init__(self,x,y,z): self.x = x From noreply at buildbot.pypy.org Thu May 29 12:25:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 May 2014 12:25:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Attempt at fixing the logic in c84e52937998 Message-ID: <20140529102559.4556F1C33B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71774:0ce7b860140b Date: 2014-05-29 12:25 +0200 http://bitbucket.org/pypy/pypy/changeset/0ce7b860140b/ Log: Attempt at fixing the logic in c84e52937998 diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -366,7 +366,11 @@ procedure_token = cell.get_procedure_token() if procedure_token is None: # it was an aborted compilation, or maybe a weakref that - # has been freed + # has been freed. Add the flag "JC_TEMPORARY" in case + # cleanup_chain() doesn't unlink the cell (stm only), so + # that the next time we'll count ticks and eventually + # enter bound_reached() again. + cell.flags |= JC_TEMPORARY jitcounter.cleanup_chain(hash) return if not confirm_enter_jit(*args): From noreply at buildbot.pypy.org Thu May 29 12:36:41 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 29 May 2014 12:36:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc 70c403598485 Message-ID: <20140529103641.18B561C33B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71775:f0c0a7378c89 Date: 2014-05-29 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/f0c0a7378c89/ Log: import stmgc 70c403598485 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -210,9 +210,7 @@ STM_PSEGMENT->start_time = tl->_timing_cur_start; STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; -#ifndef NDEBUG - STM_PSEGMENT->marker_inev[1] = 99999999999999999L; -#endif + STM_PSEGMENT->marker_inev[1] = 0; if (jmpbuf == NULL) marker_fetch_inev(); STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR @@ -482,6 +480,9 @@ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; + /* marker_inev is not needed anymore */ + STM_PSEGMENT->marker_inev[1] = 0; + /* reset these lists to NULL for the next transaction */ LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->large_overflow_objects); diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -79,9 +79,7 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track the STM status: they are old objects that where written to and - that need to be copied to other segments upon commit. Note that - every object takes three list items: the object, and two words for - the location marker. */ + that need to be copied to other segments upon commit. */ struct list_s *modified_old_objects; /* For each entry in 'modified_old_objects', we have two entries diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -432,7 +432,7 @@ for (i = list_count(lst); i > 0; i -= 2) { mark_visit_object((object_t *)list_item(lst, i - 1), base); } - if (get_priv_segment(j)->transaction_state == TS_INEVITABLE) { + if (get_priv_segment(j)->marker_inev[1]) { uintptr_t marker_inev_obj = get_priv_segment(j)->marker_inev[1]; mark_visit_object((object_t *)marker_inev_obj, base); } @@ -483,7 +483,22 @@ static inline bool largemalloc_keep_object_at(char *data) { /* this is called by _stm_largemalloc_sweep() */ - return mark_visited_test_and_clear((object_t *)(data - stm_object_pages)); + object_t *obj = (object_t *)(data - stm_object_pages); + if (!mark_visited_test_and_clear(obj)) { +#ifndef NDEBUG + /* This is actually needed in order to avoid random write-read + conflicts with objects read and freed long in the past. Still, + it is probably rare enough so that we don't need this additional + overhead. (test_random hits it sometimes) */ + long i; + for (i = 1; i <= NB_SEGMENTS; i++) { + ((struct stm_read_marker_s *) + (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; + } +#endif + return false; + } + return true; } static void sweep_large_objects(void) diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -219,9 +219,9 @@ acquire_privatization_lock(); synchronize_object_now(obj); release_privatization_lock(); + } else { + LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } - else - LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } /* the list could have moved while appending */ @@ -245,7 +245,7 @@ for (i = num_old + 1; i < total; i += 2) { minor_trace_if_young((object_t **)list_ptr_to_item(mlst, i)); } - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + if (STM_PSEGMENT->marker_inev[1]) { uintptr_t *pmarker_inev_obj = (uintptr_t *) REAL_ADDRESS(STM_SEGMENT->segment_base, &STM_PSEGMENT->marker_inev[1]); @@ -281,6 +281,10 @@ TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); + /* mark slot as unread */ + ((struct stm_read_marker_s *) + (pseg->pub.segment_base + (item->addr >> 4)))->rm = 0; + _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; @@ -342,8 +346,9 @@ collect_modified_old_objects(); num_old = 0; } - else + else { num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; + } collect_roots_from_markers(num_old); From noreply at buildbot.pypy.org Thu May 29 12:36:42 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 29 May 2014 12:36:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: hrm Message-ID: <20140529103642.868641C33B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r71776:d732fdec3f60 Date: 2014-05-29 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/d732fdec3f60/ Log: hrm diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -3b302406acd8 +70c403598485 From noreply at buildbot.pypy.org Thu May 29 12:37:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 May 2014 12:37:53 +0200 (CEST) Subject: [pypy-commit] stmgc default: Always enable this logic: we don't want any random false conflict at Message-ID: <20140529103753.E2B751C33B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1247:9164eaf02d1a Date: 2014-05-28 17:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/9164eaf02d1a/ Log: Always enable this logic: we don't want any random false conflict at all, even if they are rare, until we are sure about the trade-offs involved. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -484,17 +484,15 @@ /* this is called by _stm_largemalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); if (!mark_visited_test_and_clear(obj)) { -#ifndef NDEBUG /* This is actually needed in order to avoid random write-read - conflicts with objects read and freed long in the past. Still, - it is probably rare enough so that we don't need this additional - overhead. (test_random hits it sometimes) */ + conflicts with objects read and freed long in the past. + It is probably rare enough, but still, we want to avoid any + false conflict. (test_random hits it sometimes) */ long i; for (i = 1; i <= NB_SEGMENTS; i++) { ((struct stm_read_marker_s *) (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; } -#endif return false; } return true; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -280,7 +280,8 @@ TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { assert(!_is_in_nursery((object_t *)item->addr)); - /* mark slot as unread */ + /* mark slot as unread (it can only have the read marker + in this segment) */ ((struct stm_read_marker_s *) (pseg->pub.segment_base + (item->addr >> 4)))->rm = 0; From noreply at buildbot.pypy.org Thu May 29 14:42:38 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 29 May 2014 14:42:38 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: just some new/more numbers Message-ID: <20140529124238.113041C0109@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5281:9f7e6efe378e Date: 2014-05-29 14:42 +0200 http://bitbucket.org/pypy/extradoc/changeset/9f7e6efe378e/ Log: just some new/more numbers diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py --- a/talk/dls2014/paper/plots/plot_performance.py +++ b/talk/dls2014/paper/plots/plot_performance.py @@ -44,7 +44,7 @@ [3.91, 3.87], [2.53, 2.52], [2.23], - [2.46, 2.6] + [2.46, 2.6,2.76,2.51] ], "pypy-jit":[ [1.6], @@ -58,7 +58,7 @@ [1.68], [1.3], [1.39], - [1.66] + [1.66,1.67,1.68,1.66] ], "pypy-jit":[ [1.6], @@ -83,10 +83,10 @@ "threadworms":{ "pypy-stm-jit":[ - [4.23], - [3.4], - [3.16], - [3.4, 3.3] + [4.23,4.33], + [3.4,3.34,3.39], + [3.16,2.96,3.5,2.9,3.3], + [3.4, 3.3,3.32,3.86] ], "pypy-jit":[ [4.14], @@ -97,10 +97,10 @@ "mandelbrot":{ "pypy-stm-jit":[ - [18.5], - [9.9], - [8.4], - [7.2] + [17.87,17.88,17.88], + [9.4,9.42,9.34], + [7.75,7.8,8.2,7.7], + [6.8,6.55,6.9,6.7] ], "pypy-jit":[ [13.5], From noreply at buildbot.pypy.org Thu May 29 15:35:44 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 29 May 2014 15:35:44 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <20140529133544.2CEF11C141C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5282:c8099a66025a Date: 2014-05-29 15:35 +0200 http://bitbucket.org/pypy/extradoc/changeset/c8099a66025a/ Log: tweaks diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -966,7 +966,7 @@ because we can minimise non-determinism. We also do not want to depend on the capabilities of the JIT in these experiments. -We performed all benchmarks on a machine with a Intel Core i7-4770 +We performed all benchmarks on a machine with an Intel Core i7-4770 CPU~@3.40GHz (4 cores, 8 threads). There are 16~GiB of memory available and we ran them under Ubuntu 14.04 with a Linux 3.13.0 kernel. The STM system was compiled with a number of segments $N=4$ @@ -1002,6 +1002,13 @@ (incl. the sharing segment). During major collections we re-share the pages if possible. +\begin{figure}[h] + \centering + \includegraphics[width=1\columnwidth]{plots/richards_mem.pdf} + \caption{Actual memory managed by the GC and the page privatisation + over time in Richards benchmark\label{fig:richards_mem}} +\end{figure} + In figure \ref{fig:richards_mem} we look at the memory usage of one of our benchmarks called Richards\footnote{OS kernel simulation benchmark}. The \emph{GC managed memory} counts all memory used in the @@ -1034,18 +1041,11 @@ duplicate any data structures like e.g. the Nursery for each thread. This, the missing optimisation, and the additional memory requirements for STM explained above account for this difference. -We expect to improve this aspect in the future, in this paper we +We expect to improve this aspect in the future; in this paper we want to focus first on performance. \remi{I don't know how much sense it makes to go deeper. We will improve this in the future, but right now this is the overall picture.} -\begin{figure}[h] - \centering - \includegraphics[width=1\columnwidth]{plots/richards_mem.pdf} - \caption{Actual memory managed by the GC and the page privatisation - over time in Richards benchmark\label{fig:richards_mem}} -\end{figure} - \subsection{Overhead Breakdown} @@ -1065,7 +1065,8 @@ \subsection{Scaling} To asses how well the STM system scales on its own (without any real -workload), we execute the following loop on 1 to 4 threads: +workload), we execute the following loop on 1 to 4 threads on our +Python interpreter with STM: \begin{lstlisting} def workload(): i = 20000000 @@ -1073,11 +1074,13 @@ i -= 1 \end{lstlisting} -For the results in figure \ref{fig:scaling}, we averaged -over 5 runs and normalised the average runtimes to the -time it took on a single thread. From this we see that there -is additional overhead introduced by each thread ($13\%$ -for all 4 threads together). +For the results in figure \ref{fig:scaling}, we averaged over 5 runs +and normalised the average runtimes to the time it took on a single +thread. From this we see that there is additional overhead introduced +by each thread (\remi{$13\%$} for all 4 threads together). While this +is not ideal, we think that \remi{$13\%$} are acceptable on four +threads. In terms of throughput, 4 threads have \remi{$3.53\times$} +more iterations per second than a single thread. \remi{what we don't show is by how much this overhead is influenced by allocations} diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf index 91fb1a124c3de207c8b7a0781e2869c04d295d85..fc79357f425d91a5bb8d7139429603cec7458407 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_scaling.py b/talk/dls2014/paper/plots/plot_scaling.py --- a/talk/dls2014/paper/plots/plot_scaling.py +++ b/talk/dls2014/paper/plots/plot_scaling.py @@ -40,9 +40,9 @@ print y ax.errorbar(xs, y, yerr=yerr, - label="STM") + label="pypy-stm-nojit") ax.plot(xs, opt_y, label="optimal") - return ax.legend(loc=4) + return ax.legend(loc=0) def main(): @@ -55,7 +55,7 @@ ax.set_ylabel("Runtime normalized to 1 thread") ax.set_xlabel("Threads") - ax.set_ylim(0, 1.5) + ax.set_ylim(0.9, 1.3) ax.set_xlim(0, 5) legend = plot_mems(ax) @@ -87,7 +87,7 @@ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Plot stm log files') - parser.add_argument('--figure-size', default='6x4', + parser.add_argument('--figure-size', default='6x3', help='set figure size in inches: format=6x4') parser.add_argument('--font-size', default='10.0', help='set font size in pts: 10.0') diff --git a/talk/dls2014/paper/plots/scaling.pdf b/talk/dls2014/paper/plots/scaling.pdf index 3b5b43b271d3c03d3edf230a38d24389b7bfdb6c..04b19a058dc3bc65177d65132dbdb9ee2d1e2c3d GIT binary patch [cut] From noreply at buildbot.pypy.org Thu May 29 19:33:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 May 2014 19:33:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out changeset 8faa5a81be74 Message-ID: <20140529173324.80EEA1C3448@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71777:5c276051bf44 Date: 2014-05-29 19:32 +0200 http://bitbucket.org/pypy/pypy/changeset/5c276051bf44/ Log: Backed out changeset 8faa5a81be74 Forgot to add rnone.py, making imports break. diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -160,6 +160,8 @@ def consider_call_site_for_pbc(self, s_callable, args, s_result, call_op): descs = list(s_callable.descriptions) + if not descs: + return family = descs[0].getcallfamily() s_callable.getKind().consider_call_site(self, family, descs, args, s_result, call_op) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -486,7 +486,7 @@ assert False def specialize_call(self, hop): - from rpython.rtyper.rnone import NoneRepr + from rpython.rtyper import rpbc PTR = hop.r_result.lowleveltype if isinstance(PTR, lltype.Ptr): T = lltype.Ptr @@ -496,7 +496,7 @@ assert False hop.exception_cannot_occur() - if isinstance(hop.args_r[1], NoneRepr): + if isinstance(hop.args_r[1], rpbc.NoneFrozenPBCRepr): return hop.inputconst(PTR, null) v_arg = hop.inputarg(hop.args_r[1], arg=1) assert isinstance(v_arg.concretetype, T) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -7,15 +7,39 @@ from rpython.rtyper.lltypesystem import rclass, llmemory from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) -from rpython.rtyper.rmodel import Repr, inputconst +from rpython.rtyper.rmodel import Repr, TyperError, inputconst from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, - SingleFrozenPBCRepr, get_concrete_calltable) + SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, + get_concrete_calltable) from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pairtype +def rtype_is_None(robj1, rnone2, hop, pos=0): + if isinstance(robj1.lowleveltype, Ptr): + v1 = hop.inputarg(robj1, pos) + return hop.genop('ptr_iszero', [v1], resulttype=Bool) + elif robj1.lowleveltype == llmemory.Address: + v1 = hop.inputarg(robj1, pos) + cnull = hop.inputconst(llmemory.Address, robj1.null_instance()) + return hop.genop('adr_eq', [v1, cnull], resulttype=Bool) + elif robj1 == none_frozen_pbc_repr: + return hop.inputconst(Bool, True) + elif isinstance(robj1, SmallFunctionSetPBCRepr): + if robj1.s_pbc.can_be_None: + v1 = hop.inputarg(robj1, pos) + return hop.genop('char_eq', [v1, inputconst(Char, '\000')], + resulttype=Bool) + else: + return inputconst(Bool, False) + else: + raise TyperError('rtype_is_None of %r' % (robj1)) + + +# ____________________________________________________________ + class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): """Representation selected for multiple non-callable pre-built constants.""" def __init__(self, rtyper, access_set): diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -4,8 +4,9 @@ from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import typeOf, Void +from rpython.rtyper.lltypesystem.lltype import typeOf, Void, Bool from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, inputdesc, warning, impossible_repr) from rpython.tool.pairtype import pair, pairtype @@ -22,7 +23,8 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, - SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr) + SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, + MethodOfFrozenPBCRepr) kind = self.getKind() if issubclass(kind, description.FunctionDesc): sample = self.any_description() @@ -59,6 +61,13 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t +class __extend__(annmodel.SomeNone): + def rtyper_makerepr(self, rtyper): + return none_frozen_pbc_repr + + def rtyper_makekey(self): + return self.__class__, + # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -580,6 +589,56 @@ def convert_from_to((r_from, r_to), v, llops): return pair(r_from.r_im_self, r_to.r_im_self).convert_from_to(v, llops) +# __ None ____________________________________________________ +class NoneFrozenPBCRepr(Repr): + lowleveltype = Void + + def rtype_bool(self, hop): + return Constant(False, Bool) + + def none_call(self, hop): + raise TyperError("attempt to call constant None") + + def ll_str(self, none): + return llstr("None") + + def get_ll_eq_function(self): + return None + + def get_ll_hash_function(self): + return ll_none_hash + + rtype_simple_call = none_call + rtype_call_args = none_call + +none_frozen_pbc_repr = NoneFrozenPBCRepr() + +def ll_none_hash(_): + return 0 + + +class __extend__(pairtype(Repr, NoneFrozenPBCRepr)): + + def convert_from_to((r_from, _), v, llops): + return inputconst(Void, None) + + def rtype_is_((robj1, rnone2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None + if hop.s_result.is_constant(): + return hop.inputconst(Bool, hop.s_result.const) + return rtype_is_None(robj1, rnone2, hop) + +class __extend__(pairtype(NoneFrozenPBCRepr, Repr)): + + def convert_from_to((_, r_to), v, llops): + return inputconst(r_to, None) + + def rtype_is_((rnone1, robj2), hop): + from rpython.rtyper.lltypesystem.rpbc import rtype_is_None + if hop.s_result.is_constant(): + return hop.inputconst(Bool, hop.s_result.const) + return rtype_is_None(robj2, rnone1, hop, pos=1) + # ____________________________________________________________ class AbstractClassesPBCRepr(Repr): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -936,7 +936,7 @@ # _______________________________________________________________________ # this has the side-effect of registering the unary and binary operations # and the rtyper_chooserepr() methods -from rpython.rtyper import rint, rbool, rfloat, rnone +from rpython.rtyper import rint, rbool, rfloat from rpython.rtyper import rrange from rpython.rtyper import rstr, rdict, rlist, rbytearray from rpython.rtyper import rclass, rbuiltin, rpbc From noreply at buildbot.pypy.org Thu May 29 19:40:48 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 29 May 2014 19:40:48 +0200 (CEST) Subject: [pypy-commit] pypy default: redo commit 8faa5a81be74 properly Message-ID: <20140529174048.C71591C141C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71778:62fe68e481ea Date: 2014-05-29 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/62fe68e481ea/ Log: redo commit 8faa5a81be74 properly diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -160,8 +160,6 @@ def consider_call_site_for_pbc(self, s_callable, args, s_result, call_op): descs = list(s_callable.descriptions) - if not descs: - return family = descs[0].getcallfamily() s_callable.getKind().consider_call_site(self, family, descs, args, s_result, call_op) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -486,7 +486,7 @@ assert False def specialize_call(self, hop): - from rpython.rtyper import rpbc + from rpython.rtyper.rnone import NoneRepr PTR = hop.r_result.lowleveltype if isinstance(PTR, lltype.Ptr): T = lltype.Ptr @@ -496,7 +496,7 @@ assert False hop.exception_cannot_occur() - if isinstance(hop.args_r[1], rpbc.NoneFrozenPBCRepr): + if isinstance(hop.args_r[1], NoneRepr): return hop.inputconst(PTR, null) v_arg = hop.inputarg(hop.args_r[1], arg=1) assert isinstance(v_arg.concretetype, T) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -7,39 +7,15 @@ from rpython.rtyper.lltypesystem import rclass, llmemory from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) -from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rmodel import Repr, inputconst from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, - SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, - get_concrete_calltable) + SingleFrozenPBCRepr, get_concrete_calltable) from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pairtype -def rtype_is_None(robj1, rnone2, hop, pos=0): - if isinstance(robj1.lowleveltype, Ptr): - v1 = hop.inputarg(robj1, pos) - return hop.genop('ptr_iszero', [v1], resulttype=Bool) - elif robj1.lowleveltype == llmemory.Address: - v1 = hop.inputarg(robj1, pos) - cnull = hop.inputconst(llmemory.Address, robj1.null_instance()) - return hop.genop('adr_eq', [v1, cnull], resulttype=Bool) - elif robj1 == none_frozen_pbc_repr: - return hop.inputconst(Bool, True) - elif isinstance(robj1, SmallFunctionSetPBCRepr): - if robj1.s_pbc.can_be_None: - v1 = hop.inputarg(robj1, pos) - return hop.genop('char_eq', [v1, inputconst(Char, '\000')], - resulttype=Bool) - else: - return inputconst(Bool, False) - else: - raise TyperError('rtype_is_None of %r' % (robj1)) - - -# ____________________________________________________________ - class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): """Representation selected for multiple non-callable pre-built constants.""" def __init__(self, rtyper, access_set): diff --git a/rpython/rtyper/rnone.py b/rpython/rtyper/rnone.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/rnone.py @@ -0,0 +1,82 @@ +from rpython.flowspace.model import Constant +from rpython.annotator.model import SomeNone +from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.lltypesystem.lltype import Void, Bool, Ptr, Char +from rpython.rtyper.lltypesystem.llmemory import Address +from rpython.rtyper.lltypesystem.rpbc import SmallFunctionSetPBCRepr +from rpython.rtyper.annlowlevel import llstr +from rpython.tool.pairtype import pairtype + +class NoneRepr(Repr): + lowleveltype = Void + + def rtype_bool(self, hop): + return Constant(False, Bool) + + def none_call(self, hop): + raise TyperError("attempt to call constant None") + + def ll_str(self, none): + return llstr("None") + + def get_ll_eq_function(self): + return None + + def get_ll_hash_function(self): + return ll_none_hash + + rtype_simple_call = none_call + rtype_call_args = none_call + +none_repr = NoneRepr() + +class __extend__(SomeNone): + def rtyper_makerepr(self, rtyper): + return none_repr + + def rtyper_makekey(self): + return self.__class__, + +def ll_none_hash(_): + return 0 + + +class __extend__(pairtype(Repr, NoneRepr)): + + def convert_from_to((r_from, _), v, llops): + return inputconst(Void, None) + + def rtype_is_((robj1, rnone2), hop): + if hop.s_result.is_constant(): + return hop.inputconst(Bool, hop.s_result.const) + return rtype_is_None(robj1, rnone2, hop) + +class __extend__(pairtype(NoneRepr, Repr)): + + def convert_from_to((_, r_to), v, llops): + return inputconst(r_to, None) + + def rtype_is_((rnone1, robj2), hop): + if hop.s_result.is_constant(): + return hop.inputconst(Bool, hop.s_result.const) + return rtype_is_None(robj2, rnone1, hop, pos=1) + +def rtype_is_None(robj1, rnone2, hop, pos=0): + if isinstance(robj1.lowleveltype, Ptr): + v1 = hop.inputarg(robj1, pos) + return hop.genop('ptr_iszero', [v1], resulttype=Bool) + elif robj1.lowleveltype == Address: + v1 = hop.inputarg(robj1, pos) + cnull = hop.inputconst(Address, robj1.null_instance()) + return hop.genop('adr_eq', [v1, cnull], resulttype=Bool) + elif robj1 == none_repr: + return hop.inputconst(Bool, True) + elif isinstance(robj1, SmallFunctionSetPBCRepr): + if robj1.s_pbc.can_be_None: + v1 = hop.inputarg(robj1, pos) + return hop.genop('char_eq', [v1, inputconst(Char, '\000')], + resulttype=Bool) + else: + return inputconst(Bool, False) + else: + raise TyperError('rtype_is_None of %r' % (robj1)) diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -4,9 +4,8 @@ from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse -from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import typeOf, Void, Bool +from rpython.rtyper.lltypesystem.lltype import typeOf, Void from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, inputdesc, warning, impossible_repr) from rpython.tool.pairtype import pair, pairtype @@ -23,8 +22,7 @@ class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): from rpython.rtyper.lltypesystem.rpbc import (FunctionsPBCRepr, - SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr, - MethodOfFrozenPBCRepr) + SmallFunctionSetPBCRepr, ClassesPBCRepr, MethodsPBCRepr) kind = self.getKind() if issubclass(kind, description.FunctionDesc): sample = self.any_description() @@ -61,13 +59,6 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t -class __extend__(annmodel.SomeNone): - def rtyper_makerepr(self, rtyper): - return none_frozen_pbc_repr - - def rtyper_makekey(self): - return self.__class__, - # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -589,56 +580,6 @@ def convert_from_to((r_from, r_to), v, llops): return pair(r_from.r_im_self, r_to.r_im_self).convert_from_to(v, llops) -# __ None ____________________________________________________ -class NoneFrozenPBCRepr(Repr): - lowleveltype = Void - - def rtype_bool(self, hop): - return Constant(False, Bool) - - def none_call(self, hop): - raise TyperError("attempt to call constant None") - - def ll_str(self, none): - return llstr("None") - - def get_ll_eq_function(self): - return None - - def get_ll_hash_function(self): - return ll_none_hash - - rtype_simple_call = none_call - rtype_call_args = none_call - -none_frozen_pbc_repr = NoneFrozenPBCRepr() - -def ll_none_hash(_): - return 0 - - -class __extend__(pairtype(Repr, NoneFrozenPBCRepr)): - - def convert_from_to((r_from, _), v, llops): - return inputconst(Void, None) - - def rtype_is_((robj1, rnone2), hop): - from rpython.rtyper.lltypesystem.rpbc import rtype_is_None - if hop.s_result.is_constant(): - return hop.inputconst(Bool, hop.s_result.const) - return rtype_is_None(robj1, rnone2, hop) - -class __extend__(pairtype(NoneFrozenPBCRepr, Repr)): - - def convert_from_to((_, r_to), v, llops): - return inputconst(r_to, None) - - def rtype_is_((rnone1, robj2), hop): - from rpython.rtyper.lltypesystem.rpbc import rtype_is_None - if hop.s_result.is_constant(): - return hop.inputconst(Bool, hop.s_result.const) - return rtype_is_None(robj2, rnone1, hop, pos=1) - # ____________________________________________________________ class AbstractClassesPBCRepr(Repr): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -936,7 +936,7 @@ # _______________________________________________________________________ # this has the side-effect of registering the unary and binary operations # and the rtyper_chooserepr() methods -from rpython.rtyper import rint, rbool, rfloat +from rpython.rtyper import rint, rbool, rfloat, rnone from rpython.rtyper import rrange from rpython.rtyper import rstr, rdict, rlist, rbytearray from rpython.rtyper import rclass, rbuiltin, rpbc From noreply at buildbot.pypy.org Thu May 29 19:51:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 29 May 2014 19:51:33 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix noConst(): recent improvements to heapcache mean this hack no longer Message-ID: <20140529175133.B64811C141C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71779:94a0df43071c Date: 2014-05-29 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/94a0df43071c/ Log: Fix noConst(): recent improvements to heapcache mean this hack no longer works. Use instead some direct way. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -561,8 +561,10 @@ return [SpaceOperation('-live-', [], None), op1, None] if hints.get('force_virtualizable'): return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) - else: - log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) + if hints.get('force_no_const'): # for tests only + assert getkind(op.args[0].concretetype) == 'int' + return SpaceOperation('int_same_as', [op.args[0]], op.result) + log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) def _rewrite_raw_malloc(self, op, name, args): d = op.args[1].value.copy() diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -380,6 +380,10 @@ # ---------- + @arguments("i", returns="i") + def bhimpl_int_same_as(a): + return a + @arguments("i", "i", returns="i") def bhimpl_int_add(a, b): return intmask(a + b) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -231,6 +231,14 @@ ''' % (_opimpl, _opimpl.upper())).compile() @arguments("box") + def opimpl_int_same_as(self, box): + # for tests only: emits a same_as, forcing the result to be in a Box + resbox = history.BoxInt(box.getint()) + self.metainterp._record_helper_nonpure_varargs( + rop.SAME_AS, resbox, None, [box]) + return resbox + + @arguments("box") def opimpl_ptr_nonzero(self, box): return self.execute(rop.PTR_NE, box, history.CONST_NULL) diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -273,6 +273,5 @@ def noConst(x): """Helper function for tests, returning 'x' as a BoxInt/BoxPtr even if it is a ConstInt/ConstPtr.""" - f1 = _Foo(); f2 = _Foo() - f1.x = x; f2.x = 0 - return f1.x + from rpython.rlib import jit + return jit.hint(x, force_no_const=True) From noreply at buildbot.pypy.org Thu May 29 21:23:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 29 May 2014 21:23:32 +0200 (CEST) Subject: [pypy-commit] pypy py3k: accommodate struct.error instead of TypeError now Message-ID: <20140529192332.3D6641C0385@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71780:f4c632903a3d Date: 2014-05-29 12:21 -0700 http://bitbucket.org/pypy/pypy/changeset/f4c632903a3d/ Log: accommodate struct.error instead of TypeError now diff --git a/lib-python/3/test/test_struct.py b/lib-python/3/test/test_struct.py --- a/lib-python/3/test/test_struct.py +++ b/lib-python/3/test/test_struct.py @@ -524,7 +524,8 @@ if IS32BIT: def test_crasher(self): # Pypy catches the string error instead of calculating the entire size - self.assertRaises((MemoryError, TypeError), struct.pack, "357913941b", "a") + self.assertRaises((MemoryError, struct.error), + struct.pack, "357913941b", "a") def test_trailing_counter(self): store = array.array('b', b' '*100) From noreply at buildbot.pypy.org Thu May 29 21:23:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 29 May 2014 21:23:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix space.hash to properly handle 'longs' Message-ID: <20140529192333.941681C0385@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71781:9b8b9359ea33 Date: 2014-05-29 12:22 -0700 http://bitbucket.org/pypy/pypy/changeset/9b8b9359ea33/ Log: fix space.hash to properly handle 'longs' diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -433,15 +433,17 @@ raise oefmt(space.w_TypeError, "'%T' objects are unhashable", w_obj) w_result = space.get_and_call_function(w_hash, w_obj) - w_resulttype = space.type(w_result) - if space.is_w(w_resulttype, space.w_int): + if not space.isinstance_w(w_result, space.w_int): + raise oefmt(space.w_TypeError, + "__hash__ method should return an integer") + + from pypy.objspace.std.intobject import W_IntObject + if type(w_result) is W_IntObject: return w_result - elif space.isinstance_w(w_result, space.w_int): - # be careful about subclasses of 'int'... + elif isinstance(w_result, W_IntObject): return space.wrap(space.int_w(w_result)) - else: - raise OperationError(space.w_TypeError, - space.wrap("__hash__() should return an int")) + # a non W_IntObject int, assume long-like + return w_result.descr_hash(space) def userdel(space, w_obj): w_del = space.lookup(w_obj, '__del__') diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -715,6 +715,17 @@ return CannotConvertToBool() x = X() raises(MyError, "'foo' in x") + + def test_64bit_hash(self): + import sys + class BigHash(object): + def __hash__(self): + return sys.maxsize + 2 + def __eq__(self, other): + return isinstance(other, BigHash) + # previously triggered an OverflowError + d = {BigHash(): None} + assert BigHash() in d class AppTestWithBuiltinShortcut(AppTest_Descroperation): From noreply at buildbot.pypy.org Thu May 29 21:23:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 29 May 2014 21:23:35 +0200 (CEST) Subject: [pypy-commit] pypy py3k: forgot to re-enable set's ByteStrategy, fix Message-ID: <20140529192335.0D3A41C0385@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71782:3e6c1e7aaacb Date: 2014-05-29 12:22 -0700 http://bitbucket.org/pypy/pypy/changeset/3e6c1e7aaacb/ Log: forgot to re-enable set's ByteStrategy, fix diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1584,8 +1584,6 @@ return # check for strings - # XXX: - """ for w_item in iterable_w: if type(w_item) is not W_BytesObject: break @@ -1593,7 +1591,6 @@ w_set.strategy = space.fromcache(BytesSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return - """ # check for unicode for w_item in iterable_w: From noreply at buildbot.pypy.org Thu May 29 21:43:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 29 May 2014 21:43:56 +0200 (CEST) Subject: [pypy-commit] pypy py3k: hopefully fix translation Message-ID: <20140529194356.14E5F1C0109@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71783:808717088f2a Date: 2014-05-29 12:43 -0700 http://bitbucket.org/pypy/pypy/changeset/808717088f2a/ Log: hopefully fix translation diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -437,12 +437,14 @@ raise oefmt(space.w_TypeError, "__hash__ method should return an integer") - from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.intobject import ( + W_AbstractIntObject, W_IntObject) if type(w_result) is W_IntObject: return w_result elif isinstance(w_result, W_IntObject): return space.wrap(space.int_w(w_result)) # a non W_IntObject int, assume long-like + assert isinstance(w_result, W_AbstractIntObject) return w_result.descr_hash(space) def userdel(space, w_obj): From noreply at buildbot.pypy.org Thu May 29 23:13:37 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 May 2014 23:13:37 +0200 (CEST) Subject: [pypy-commit] pypy default: close files opened during import (upstream issue 21610 created and patch submitted) Message-ID: <20140529211337.CD3C01C0109@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71784:650405891542 Date: 2014-05-30 00:13 +0300 http://bitbucket.org/pypy/pypy/changeset/650405891542/ Log: close files opened during import (upstream issue 21610 created and patch submitted) diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py --- a/lib-python/2.7/imputil.py +++ b/lib-python/2.7/imputil.py @@ -422,7 +422,8 @@ saved back to the filesystem for future imports. The source file's modification timestamp must be provided as a Long value. """ - codestring = open(pathname, 'rU').read() + with open(pathname, 'rU') as fp: + codestring = fp.read() if codestring and codestring[-1] != '\n': codestring = codestring + '\n' code = __builtin__.compile(codestring, pathname, 'exec') @@ -603,8 +604,8 @@ self.desc = desc def import_file(self, filename, finfo, fqname): - fp = open(filename, self.desc[1]) - module = imp.load_module(fqname, fp, filename, self.desc) + with open(filename, self.desc[1]) as fp: + module = imp.load_module(fqname, fp, filename, self.desc) module.__file__ = filename return 0, module, { } diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py --- a/lib-python/2.7/modulefinder.py +++ b/lib-python/2.7/modulefinder.py @@ -109,16 +109,16 @@ def run_script(self, pathname): self.msg(2, "run_script", pathname) - fp = open(pathname, READ_MODE) - stuff = ("", "r", imp.PY_SOURCE) - self.load_module('__main__', fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = ("", "r", imp.PY_SOURCE) + self.load_module('__main__', fp, pathname, stuff) def load_file(self, pathname): dir, name = os.path.split(pathname) name, ext = os.path.splitext(name) - fp = open(pathname, READ_MODE) - stuff = (ext, "r", imp.PY_SOURCE) - self.load_module(name, fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = (ext, "r", imp.PY_SOURCE) + self.load_module(name, fp, pathname, stuff) def import_hook(self, name, caller=None, fromlist=None, level=-1): self.msg(3, "import_hook", name, caller, fromlist, level) @@ -461,6 +461,8 @@ fp, buf, stuff = self.find_module("__init__", m.__path__) self.load_module(fqname, fp, buf, stuff) self.msgout(2, "load_package ->", m) + if fp: + fp.close() return m def add_module(self, fqname): From noreply at buildbot.pypy.org Fri May 30 14:45:56 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 May 2014 14:45:56 +0200 (CEST) Subject: [pypy-commit] pypy default: close files opened by FileType option handling, there must be a better way? Message-ID: <20140530124556.5CCDF1C06C0@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r71785:c57ac1c3539e Date: 2014-05-30 15:44 +0300 http://bitbucket.org/pypy/pypy/changeset/c57ac1c3539e/ Log: close files opened by FileType option handling, there must be a better way? diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,6 +48,9 @@ def tearDown(self): os.chdir(self.old_dir) + import gc + # Force a collection which should close FileType() options + gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) From noreply at buildbot.pypy.org Fri May 30 16:08:38 2014 From: noreply at buildbot.pypy.org (ISF) Date: Fri, 30 May 2014 16:08:38 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Use the correct method for getting values at compile_loop_many_int_args Message-ID: <20140530140838.727321D25B2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r71786:f6256f2fac96 Date: 2014-05-30 14:08 +0000 http://bitbucket.org/pypy/pypy/changeset/f6256f2fac96/ Log: Use the correct method for getting values at compile_loop_many_int_args diff --git a/rpython/jit/backend/ppc/test/test_runner.py b/rpython/jit/backend/ppc/test/test_runner.py --- a/rpython/jit/backend/ppc/test/test_runner.py +++ b/rpython/jit/backend/ppc/test/test_runner.py @@ -56,8 +56,8 @@ ARGS = [lltype.Signed] * numargs RES = lltype.Signed args = [i+1 for i in range(numargs)] - res = self.cpu.execute_token(looptoken, *args) - assert self.cpu.get_latest_value_int(0) == sum(args) + deadframe = self.cpu.execute_token(looptoken, *args) + assert self.cpu.get_int_value(deadframe, 0) == sum(args) def test_return_spilled_args(self): numargs = 50 From noreply at buildbot.pypy.org Fri May 30 21:13:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 30 May 2014 21:13:37 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix windows' translation Message-ID: <20140530191337.9074C1C0109@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r71787:c8596183deb9 Date: 2014-05-30 12:12 -0700 http://bitbucket.org/pypy/pypy/changeset/c8596183deb9/ Log: fix windows' translation diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -328,7 +328,7 @@ buf[0] = '\0' else: try: - value = w_value.readbuf_w(space) + value = w_value.buffer_w(space, space.BUF_SIMPLE) except TypeError: raise oefmt(space.w_TypeError, "Objects of type '%T' can not be used as binary " From noreply at buildbot.pypy.org Sat May 31 16:31:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 May 2014 16:31:59 +0200 (CEST) Subject: [pypy-commit] cffi default: tweak for Windows (msvc versus mingw) Message-ID: <20140531143159.6D5BC1C0109@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1512:ee83f0ac370a Date: 2014-05-30 18:49 +0200 http://bitbucket.org/cffi/cffi/changeset/ee83f0ac370a/ Log: tweak for Windows (msvc versus mingw) diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -222,7 +222,7 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; -#if !defined(_MSC_VER) || _MSC_VER <= 1700 +#if defined(_MSC_VER) && _MSC_VER <= 1700 typedef unsigned char _Bool; #endif diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -780,7 +780,7 @@ typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; -#if !defined(_MSC_VER) || _MSC_VER <= 1700 +#if defined(_MSC_VER) && _MSC_VER <= 1700 typedef unsigned char _Bool; #endif #else diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -564,7 +564,9 @@ typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; typedef SSIZE_T ssize_t; +#if defined(_MSC_VER) && _MSC_VER <= 1700 typedef unsigned char _Bool; +#endif #else # include #endif From noreply at buildbot.pypy.org Sat May 31 19:22:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 May 2014 19:22:44 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix an obscure bug. (issue 1767) Message-ID: <20140531172244.B52011C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r71788:52395a782909 Date: 2014-05-31 19:22 +0200 http://bitbucket.org/pypy/pypy/changeset/52395a782909/ Log: Fix an obscure bug. (issue 1767) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -196,6 +196,16 @@ [history.ConstInt(really_wanted)]) mi.vrefs_after_residual_call() mi.vable_after_residual_call() + # + if not really_wanted: + # we're about the return ConstInt(0), which will go into the + # jitcode's %iN variable. But it will be captured by the + # GUARD_NOT_FORCED's resume data too. It is essential that we + # don't capture the old, stale value! Also, store ConstInt(1) + # to make sure that upon resuming we'll see a result of 1 (XXX + # unsure if it's needed, but it shouldn't hurt). + self.make_result_of_lastop(ConstInt(1)) + # mi.generate_guard(rop.GUARD_NOT_FORCED, None) self.metainterp.heapcache.stm_break_done() From noreply at buildbot.pypy.org Sat May 31 19:23:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 31 May 2014 19:23:43 +0200 (CEST) Subject: [pypy-commit] pypy default: By default, use for the RPython-level 'str(instance)' a different Message-ID: <20140531172343.0DE041C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71789:2176b15be96f Date: 2014-05-31 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/2176b15be96f/ Log: By default, use for the RPython-level 'str(instance)' a different "address" number which corresponds to the id. That's needed to debug some kind of problems. diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -585,7 +585,11 @@ if not i: return rstr.null_str instance = cast_pointer(OBJECTPTR, i) - uid = r_uint(cast_ptr_to_int(i)) + # Two choices: the first gives a fast answer but it can change + # (typically only once) during the life of the object. + #uid = r_uint(cast_ptr_to_int(i)) + uid = llop.gc_id(lltype.Unsigned, i) + # nameLen = len(instance.typeptr.name) nameString = rstr.mallocstr(nameLen-1) i = 0 From noreply at buildbot.pypy.org Sat May 31 22:29:29 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 31 May 2014 22:29:29 +0200 (CEST) Subject: [pypy-commit] pypy default: fix 2176b15be96f hopefully Message-ID: <20140531202929.9AB0B1C01DE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71790:fffc1c0969ff Date: 2014-05-31 21:28 +0100 http://bitbucket.org/pypy/pypy/changeset/fffc1c0969ff/ Log: fix 2176b15be96f hopefully diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -588,7 +588,7 @@ # Two choices: the first gives a fast answer but it can change # (typically only once) during the life of the object. #uid = r_uint(cast_ptr_to_int(i)) - uid = llop.gc_id(lltype.Unsigned, i) + uid = llop.gc_id(lltype.Signed, i) # nameLen = len(instance.typeptr.name) nameString = rstr.mallocstr(nameLen-1) From noreply at buildbot.pypy.org Sat May 31 22:58:48 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 31 May 2014 22:58:48 +0200 (CEST) Subject: [pypy-commit] pypy default: inline single-use function Message-ID: <20140531205848.AF89C1C02AF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r71791:d3305f62abe4 Date: 2014-05-31 21:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d3305f62abe4/ Log: inline single-use function diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -345,17 +345,6 @@ # ____________________________________________________________ -def inputdesc(reqtype, desc): - """Return a Constant for the given desc, of the requested type, - which can only be a Repr. - """ - assert isinstance(reqtype, Repr) - value = reqtype.convert_desc(desc) - lltype = reqtype.lowleveltype - c = Constant(value) - c.concretetype = lltype - return c - def inputconst(reqtype, value): """Return a Constant with the given value, of the requested type, which can be a Repr instance or a low-level type. diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -7,7 +7,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import typeOf, Void from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, - inputdesc, warning, impossible_repr) + warning, impossible_repr) from rpython.tool.pairtype import pair, pairtype @@ -495,7 +495,9 @@ frozendesc1 = r_pbc1.frozendesc access = frozendesc1.queryattrfamily() if access is r_pbc2.access_set: - return inputdesc(r_pbc2, frozendesc1) + value = r_pbc2.convert_desc(frozendesc1) + lltype = r_pbc2.lowleveltype + return Constant(value, lltype) return NotImplemented class __extend__(pairtype(AbstractMultipleUnrelatedFrozenPBCRepr,